repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
DLYuanGod/TinyGPT-V | minigpt4/datasets/builders/image_text_pair_builder.py | [
{
"identifier": "registry",
"path": "minigpt4/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "BaseDatasetBuilder",
"path": "minigpt4/datasets/builders/base_dataset_builder.py",
"snippet": "class BaseDatasetBuilder:\n train_dataset_cls, eval_dataset_cls = None, None\n\n def __init__(self, cfg=None):\n super().__init__()\n\n if cfg is None:\n # help to create datasets from default config.\n self.config = load_dataset_config(self.default_config_path())\n elif isinstance(cfg, str):\n self.config = load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n self.data_type = self.config.data_type\n\n self.vis_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n self.text_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n\n def build_datasets(self):\n # download, split, etc...\n # only called on 1 GPU/TPU in distributed\n\n if is_main_process():\n self._download_data()\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n # at this point, all the annotations and image/videos should be all downloaded to the specified locations.\n logging.info(\"Building datasets...\")\n datasets = self.build() # dataset['train'/'val'/'test']\n\n return datasets\n\n def build_processors(self):\n vis_proc_cfg = self.config.get(\"vis_processor\")\n txt_proc_cfg = self.config.get(\"text_processor\")\n\n if vis_proc_cfg is not None:\n vis_train_cfg = vis_proc_cfg.get(\"train\")\n vis_eval_cfg = vis_proc_cfg.get(\"eval\")\n\n self.vis_processors[\"train\"] = self._build_proc_from_cfg(vis_train_cfg)\n self.vis_processors[\"eval\"] = self._build_proc_from_cfg(vis_eval_cfg)\n\n if txt_proc_cfg is not None:\n txt_train_cfg = txt_proc_cfg.get(\"train\")\n txt_eval_cfg = txt_proc_cfg.get(\"eval\")\n\n self.text_processors[\"train\"] = self._build_proc_from_cfg(txt_train_cfg)\n self.text_processors[\"eval\"] = self._build_proc_from_cfg(txt_eval_cfg)\n\n @staticmethod\n def _build_proc_from_cfg(cfg):\n return (\n registry.get_processor_class(cfg.name).from_config(cfg)\n if cfg is not None\n else None\n )\n\n @classmethod\n def default_config_path(cls, type=\"default\"):\n return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])\n\n def _download_data(self):\n self._download_ann()\n self._download_vis()\n\n def _download_ann(self):\n \"\"\"\n Download annotation files if necessary.\n All the vision-language datasets should have annotations of unified format.\n\n storage_path can be:\n (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.\n (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.\n\n Local annotation paths should be relative.\n \"\"\"\n anns = self.config.build_info.annotations\n\n splits = anns.keys()\n\n cache_root = registry.get_path(\"cache_root\")\n\n for split in splits:\n info = anns[split]\n\n urls, storage_paths = info.get(\"url\", None), info.storage\n\n if isinstance(urls, str):\n urls = [urls]\n if isinstance(storage_paths, str):\n storage_paths = [storage_paths]\n\n assert len(urls) == len(storage_paths)\n\n for url_or_filename, storage_path in zip(urls, storage_paths):\n # if storage_path is relative, make it full by prefixing with cache_root.\n if not os.path.isabs(storage_path):\n storage_path = os.path.join(cache_root, storage_path)\n\n dirname = os.path.dirname(storage_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(url_or_filename):\n src, dst = url_or_filename, storage_path\n if not os.path.exists(dst):\n shutil.copyfile(src=src, dst=dst)\n else:\n logging.info(\"Using existing file {}.\".format(dst))\n else:\n if os.path.isdir(storage_path):\n # if only dirname is provided, suffix with basename of URL.\n raise ValueError(\n \"Expecting storage_path to be a file path, got directory {}\".format(\n storage_path\n )\n )\n else:\n filename = os.path.basename(storage_path)\n\n download_url(url=url_or_filename, root=dirname, filename=filename)\n\n def _download_vis(self):\n\n storage_path = self.config.build_info.get(self.data_type).storage\n storage_path = utils.get_cache_path(storage_path)\n\n if not os.path.exists(storage_path):\n warnings.warn(\n f\"\"\"\n The specified path {storage_path} for visual inputs does not exist.\n Please provide a correct path to the visual inputs or\n refer to datasets/download_scripts/README.md for downloading instructions.\n \"\"\"\n )\n\n def build(self):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n self.build_processors()\n\n build_info = self.config.build_info\n\n ann_info = build_info.annotations\n vis_info = build_info.get(self.data_type)\n\n datasets = dict()\n for split in ann_info.keys():\n if split not in [\"train\", \"val\", \"test\"]:\n continue\n\n is_train = split == \"train\"\n\n # processors\n vis_processor = (\n self.vis_processors[\"train\"]\n if is_train\n else self.vis_processors[\"eval\"]\n )\n text_processor = (\n self.text_processors[\"train\"]\n if is_train\n else self.text_processors[\"eval\"]\n )\n\n # annotation path\n ann_paths = ann_info.get(split).storage\n if isinstance(ann_paths, str):\n ann_paths = [ann_paths]\n\n abs_ann_paths = []\n for ann_path in ann_paths:\n if not os.path.isabs(ann_path):\n ann_path = utils.get_cache_path(ann_path)\n abs_ann_paths.append(ann_path)\n ann_paths = abs_ann_paths\n\n # visual data storage path\n vis_path = os.path.join(vis_info.storage, split)\n\n if not os.path.isabs(vis_path):\n # vis_path = os.path.join(utils.get_cache_path(), vis_path)\n vis_path = utils.get_cache_path(vis_path)\n\n if not os.path.exists(vis_path):\n warnings.warn(\"storage path {} does not exist.\".format(vis_path))\n\n # create datasets\n dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls\n datasets[split] = dataset_cls(\n vis_processor=vis_processor,\n text_processor=text_processor,\n ann_paths=ann_paths,\n vis_root=vis_path,\n )\n\n return datasets"
},
{
"identifier": "LaionDataset",
"path": "minigpt4/datasets/datasets/laion_dataset.py",
"snippet": "class LaionDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }"
},
{
"identifier": "CCSBUDataset",
"path": "minigpt4/datasets/datasets/cc_sbu_dataset.py",
"snippet": "class CCSBUDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }"
},
{
"identifier": "CCSBUAlignDataset",
"path": "minigpt4/datasets/datasets/cc_sbu_dataset.py",
"snippet": "class CCSBUAlignDataset(CaptionDataset):\n\n def __getitem__(self, index):\n\n # TODO this assumes image input, not general enough\n ann = self.annotation[index]\n\n img_file = '{}.jpg'.format(ann[\"image_id\"])\n image_path = os.path.join(self.vis_root, img_file)\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n caption = ann[\"caption\"]\n\n return {\n \"image\": image,\n \"answer\": caption,\n \"image_id\": self.img_ids[ann[\"image_id\"]],\n }"
},
{
"identifier": "TextCapDataset",
"path": "minigpt4/datasets/datasets/text_caps.py",
"snippet": "class TextCapDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n 'Briefly describe this image.',\n 'Provide a concise depiction of this image.',\n 'Present a short description of this image.',\n 'Summarize this image in a few words.',\n 'A short image caption:',\n 'A short image description:',\n 'A photo of ',\n 'An image that shows ',\n 'Write a short description for the image. ',\n 'Write a description for the photo.',\n 'Provide a description of what is presented in the photo.',\n 'Briefly describe the content of the image.',\n 'Can you briefly explain what you see in the image?',\n 'Could you use a few words to describe what you perceive in the photo?',\n 'Please provide a short depiction of the picture.',\n 'Using language, provide a short account of the image.',\n 'Use a few words to illustrate what is happening in the picture.',\n ]\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n\n def __len__(self):\n return len(self.ann[\"data\"])\n\n\n def __getitem__(self, index):\n info = self.ann[\"data\"][index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n caption = info[\"caption_str\"]\n caption = self.text_processor(caption)\n instruction = \"<Img><ImageHere></Img> [caption] {} \".format(random.choice(self.instruction_pool))\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": caption,\n }"
},
{
"identifier": "LlavaDetailDataset",
"path": "minigpt4/datasets/datasets/llava_dataset.py",
"snippet": "class LlavaDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n \n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }"
},
{
"identifier": "LlavaReasonDataset",
"path": "minigpt4/datasets/datasets/llava_dataset.py",
"snippet": "class LlavaReasonDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n\n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }"
},
{
"identifier": "LlavaConversationDataset",
"path": "minigpt4/datasets/datasets/llava_dataset.py",
"snippet": "class LlavaConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.ann=[]\n\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }"
},
{
"identifier": "UnnaturalDataset",
"path": "minigpt4/datasets/datasets/unnatural_instruction.py",
"snippet": "class UnnaturalDataset(Dataset):\n def __init__(self, text_processor, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index][\"instances\"][0]\n instruction = info[\"instruction_with_input\"]\n constraints = info[\"constraints\"]\n answer = info[\"output\"]\n if constraints != None:\n instruction = instruction+\" \"+constraints\n\n return {\n \"instruction_input\": self.text_processor(instruction),\n \"answer\": self.text_processor(answer),\n }"
},
{
"identifier": "MultiTaskConversationDataset",
"path": "minigpt4/datasets/datasets/multitask_conversation.py",
"snippet": "class MultiTaskConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }"
},
{
"identifier": "GroundedDetailDataset",
"path": "minigpt4/datasets/datasets/flickr.py",
"snippet": "class GroundedDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[grounding] please describe this image in details',\n '[grounding] describe this image as detailed as possible',\n '[grounding] summarize this image in details',\n '[grounding] give a thorough description of what you see in this image',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id'])\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['grounded_caption']\n instruction = random.choice(self.instruction_pool)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }"
},
{
"identifier": "CaptionToObjectDataset",
"path": "minigpt4/datasets/datasets/flickr.py",
"snippet": "class CaptionToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"caption\"]\n answer = info[\"output\"]\n\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"CaptionToObject instruction\", instruction)\n print(\"CaptionToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }"
},
{
"identifier": "PhraseToObjectDataset",
"path": "minigpt4/datasets/datasets/flickr.py",
"snippet": "class PhraseToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"phrase\"]\n answer = \"<p>\"+input+\"</p> \"+info[\"bbox\"]\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"PhraseToObject instruction\", instruction)\n print(\"PhraseToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }"
},
{
"identifier": "ReferVisualGenomeDataset",
"path": "minigpt4/datasets/datasets/vg_dataset.py",
"snippet": "class ReferVisualGenomeDataset(Dataset):\n def __init__(self, vis_processor, text_processor, data_dir):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.data_dir = data_dir\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n all_regions = local.get_all_region_descriptions(self.data_dir)\n all_regions = [region for regions in all_regions for region in regions]\n\n # follow OFA practice, only regions smaller than 16384 pixels are used for refer\n self.regions = [region for region in all_regions if region.width * region.height < 16384]\n\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.regions)\n\n def preprocess(self, index):\n region = self.regions[index]\n image_file = region.image.url.split('/')[-2:]\n image_path = os.path.join(self.data_dir, *image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [100,100]\n\n sample_sentence = region.phrase\n refer_sentence = self.text_processor(sample_sentence)\n\n bbox = [region.x, region.y, region.width, region.height]\n\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": region.image.id,\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }"
},
{
"identifier": "ReferCOCODataset",
"path": "minigpt4/datasets/datasets/coco_dataset.py",
"snippet": "class ReferCOCODataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.refer = REFER(ann_path, vis_root, dataset, splitBy)\n self.ref_ids = self.refer.getRefIds(split=\"train\")\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.ref_ids)\n\n def preprocess(self, index):\n ref_id = self.ref_ids[index]\n ref = self.refer.loadRefs(ref_id)[0]\n\n image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref[\"image_id\"])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [image.shape[1], image.shape[2]]\n\n image_new_size = [100,100]\n\n sample_sentence = random.choice(ref['sentences'])['raw']\n refer_sentence = self.text_processor(sample_sentence)\n\n\n bbox = self.refer.getRefBox(ref['ref_id'])\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": ref['image_id'],\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }"
},
{
"identifier": "InvReferCOCODataset",
"path": "minigpt4/datasets/datasets/coco_dataset.py",
"snippet": "class InvReferCOCODataset(ReferCOCODataset):\n def __init__(self, *args, **kwargs):\n super(InvReferCOCODataset, self).__init__(*args, **kwargs)\n\n self.instruction_pool = [\n \"[identify] {}\",\n \"[identify] what object is in this location {}\",\n \"[identify] identify the object present at this location {}\",\n \"[identify] what is it in {}\",\n \"[identify] describe this object in {}\",\n \"[identify] this {} is\",\n \"[identify] the object in {} is\",\n ]\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n\n instruction = random.choice(self.instruction_pool).format(data['bbox'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n \n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['refer_sentence']),\n \"image_id\": data['image_id'],\n }"
},
{
"identifier": "GQADataset",
"path": "minigpt4/datasets/datasets/gqa_datasets.py",
"snippet": "class GQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def __getitem__(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n instruction = random.choice(self.instruction_pool).format(question)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n answers = self.text_processor(ann[\"answer\"])\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answers,\n }"
},
{
"identifier": "AOKVQADataset",
"path": "minigpt4/datasets/datasets/aok_vqa_datasets.py",
"snippet": "class AOKVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n answer_key = \"direct_answers\"\n\n answer_weight = {}\n for answer in ann[answer_key]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[answer_key])\n else:\n answer_weight[answer] = 1 / len(ann[answer_key])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n return {\n \"image\": image,\n \"question\": question,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n question = self.text_processor(data[\"question\"])\n instruction = random.choice(self.instruction_pool).format(question)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n answer = self.text_processor(data['answer'])\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": answer,\n }"
},
{
"identifier": "COCOVQADataset",
"path": "minigpt4/datasets/datasets/coco_vqa_datasets.py",
"snippet": "class COCOVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n question_id = ann[\"question_id\"]\n\n answer_weight = {}\n for answer in ann[\"answer\"]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[\"answer\"])\n else:\n answer_weight[answer] = 1 / len(ann[\"answer\"])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n\n return {\n \"image\": image,\n \"question\": question,\n \"question_id\": question_id,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n instruction = random.choice(self.instruction_pool).format(data['question'])\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"question_id\": data[\"question_id\"],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['answer']),\n }"
},
{
"identifier": "OCRVQADataset",
"path": "minigpt4/datasets/datasets/ocrvqa_dataset.py",
"snippet": "class OCRVQADataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n self.data = self.create_data(ann_path)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def create_data(self, ann_path):\n processed_data = []\n with open(ann_path, 'r') as f:\n data = json.load(f)\n for k in data.keys():\n if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test\n ext = os.path.splitext(data[k]['imageURL'])[1]\n imageFile = k + ext\n assert len(data[k]['questions']) == len(data[k]['answers'])\n for q, a in zip(data[k]['questions'], data[k]['answers']):\n processed_data.append(\n {'question': q,\n 'answer': a,\n 'image_path': imageFile,\n 'image_id': k,\n 'title': data[k]['title'],\n 'genre': data[k]['genre'],\n }\n )\n return processed_data\n\n def __len__(self):\n return len(self.data)"
},
{
"identifier": "COCOCapDataset",
"path": "minigpt4/datasets/datasets/coco_caption.py",
"snippet": "class COCOCapEvalDataset(CaptionEvalDataset):\nclass NoCapsEvalDataset(CaptionEvalDataset):\nclass RefCOCOEvalData(torch.utils.data.Dataset):\nclass EvalCaptionData(torch.utils.data.Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __new__(cls, *args, **kwargs):\n def __len__(self):\n def __getitem__(self, idx):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __len__(self):\n def __getitem__(self, idx):"
}
] | import os
import logging
import warnings
from minigpt4.common.registry import registry
from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from minigpt4.datasets.datasets.laion_dataset import LaionDataset
from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset
from minigpt4.datasets.datasets.text_caps import TextCapDataset
from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset
from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset
from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset
from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset
from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset
from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset
from minigpt4.datasets.datasets.gqa_datasets import GQADataset
from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset
from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset
from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset
from minigpt4.datasets.datasets.coco_caption import COCOCapDataset | 10,369 |
@registry.register_builder("multitask_conversation")
class MultitaskConversationBuilder(BaseDatasetBuilder):
train_dataset_cls = MultiTaskConversationDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/multitask_conversation/default.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
vis_processor=self.vis_processors["train"],
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
vis_root=build_info.image_path,
)
return datasets
@registry.register_builder("unnatural_instruction")
class UnnaturalInstructionBuilder(BaseDatasetBuilder):
train_dataset_cls = UnnaturalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/nlp/unnatural_instruction.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
)
return datasets
@registry.register_builder("llava_detail")
class LlavaDetailBuilder(BaseDatasetBuilder):
|
@registry.register_builder("multitask_conversation")
class MultitaskConversationBuilder(BaseDatasetBuilder):
train_dataset_cls = MultiTaskConversationDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/multitask_conversation/default.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
vis_processor=self.vis_processors["train"],
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
vis_root=build_info.image_path,
)
return datasets
@registry.register_builder("unnatural_instruction")
class UnnaturalInstructionBuilder(BaseDatasetBuilder):
train_dataset_cls = UnnaturalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/nlp/unnatural_instruction.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
)
return datasets
@registry.register_builder("llava_detail")
class LlavaDetailBuilder(BaseDatasetBuilder): | train_dataset_cls = LlavaDetailDataset | 6 | 2023-12-28 05:47:18+00:00 | 12k |
ali-vilab/dreamtalk | inference_for_demo_video.py | [
{
"identifier": "get_cfg_defaults",
"path": "configs/default.py",
"snippet": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n return _C.clone()"
},
{
"identifier": "DiffusionNet",
"path": "core/networks/diffusion_net.py",
"snippet": "class DiffusionNet(Module):\n def __init__(self, cfg, net, var_sched: VarianceSchedule):\n super().__init__()\n self.cfg = cfg\n self.net = net\n self.var_sched = var_sched\n self.face3d_latent_type = self.cfg.TRAIN.FACE3D_LATENT.TYPE\n self.predict_what = self.cfg.DIFFUSION.PREDICT_WHAT\n\n if self.cfg.CF_GUIDANCE.TRAINING:\n null_style_clip = torch.zeros(\n self.cfg.DATASET.STYLE_MAX_LEN, self.cfg.DATASET.FACE3D_DIM\n )\n self.register_buffer(\"null_style_clip\", null_style_clip)\n\n null_pad_mask = torch.tensor([False] * self.cfg.DATASET.STYLE_MAX_LEN)\n self.register_buffer(\"null_pad_mask\", null_pad_mask)\n\n def _face3d_to_latent(self, face3d):\n latent = None\n if self.face3d_latent_type == \"face3d\":\n latent = face3d\n elif self.face3d_latent_type == \"normalized_face3d\":\n latent = face3d_raw_to_norm(\n face3d, exp_min=self.exp_min, exp_max=self.exp_max\n )\n else:\n raise ValueError(f\"Invalid face3d latent type: {self.face3d_latent_type}\")\n return latent\n\n def _latent_to_face3d(self, latent):\n face3d = None\n if self.face3d_latent_type == \"face3d\":\n face3d = latent\n elif self.face3d_latent_type == \"normalized_face3d\":\n latent = torch.clamp(latent, min=-1, max=1)\n face3d = face3d_norm_to_raw(\n latent, exp_min=self.exp_min, exp_max=self.exp_max\n )\n else:\n raise ValueError(f\"Invalid face3d latent type: {self.face3d_latent_type}\")\n return face3d\n\n def ddim_sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n ddim_num_step=50,\n ready_style_code=None,\n ):\n \"\"\"\n\n Args:\n audio (_type_): (B, L, W) or (B, L, W, C)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n pose_dim (_type_): int\n flexibility (float, optional): _description_. Defaults to 0.0.\n ret_traj (bool, optional): _description_. Defaults to False.\n\n\n Returns:\n _type_: (B, L, C_face)\n \"\"\"\n if self.predict_what != \"x0\":\n raise NotImplementedError(self.predict_what)\n\n if ready_style_code is not None and use_cf_guidance:\n raise NotImplementedError(\"not implement cfg for ready style code\")\n\n c = self.var_sched.num_steps // ddim_num_step\n time_steps = torch.tensor(\n np.asarray(list(range(0, self.var_sched.num_steps, c))) + 1\n )\n assert len(time_steps) == ddim_num_step\n prev_time_steps = torch.cat((torch.tensor([0]), time_steps[:-1]))\n\n batch_size, output_len = audio.shape[:2]\n # batch_size = context.size(0)\n context = {\n \"audio\": audio,\n \"style_clip\": style_clip,\n \"style_pad_mask\": style_pad_mask,\n \"ready_style_code\": ready_style_code,\n }\n if use_cf_guidance:\n uncond_style_clip = self.null_style_clip.unsqueeze(0).repeat(\n batch_size, 1, 1\n )\n uncond_pad_mask = self.null_pad_mask.unsqueeze(0).repeat(batch_size, 1)\n\n context_double = {\n \"audio\": torch.cat([audio] * 2, dim=0),\n \"style_clip\": torch.cat([style_clip, uncond_style_clip], dim=0),\n \"style_pad_mask\": torch.cat([style_pad_mask, uncond_pad_mask], dim=0),\n \"ready_style_code\": None\n if ready_style_code is None\n else torch.cat(\n [\n ready_style_code,\n self.net.style_encoder(uncond_style_clip, uncond_pad_mask),\n ],\n dim=0,\n ),\n }\n\n x_t = torch.randn([batch_size, output_len, output_dim]).to(audio.device)\n\n for idx in list(range(ddim_num_step))[::-1]:\n t = time_steps[idx]\n t_prev = prev_time_steps[idx]\n ddim_alpha = self.var_sched.alpha_bars[t]\n ddim_alpha_prev = self.var_sched.alpha_bars[t_prev]\n\n t_tensor = torch.tensor([t] * batch_size).to(audio.device).float()\n if use_cf_guidance:\n x_t_double = torch.cat([x_t] * 2, dim=0)\n t_tensor_double = torch.cat([t_tensor] * 2, dim=0)\n cond_output, uncond_output = self.net(\n x_t_double, t=t_tensor_double, **context_double\n ).chunk(2)\n diff_output = uncond_output + cfg_scale * (cond_output - uncond_output)\n else:\n diff_output = self.net(x_t, t=t_tensor, **context)\n\n pred_x0 = diff_output\n eps = (x_t - torch.sqrt(ddim_alpha) * pred_x0) / torch.sqrt(1 - ddim_alpha)\n c1 = torch.sqrt(ddim_alpha_prev)\n c2 = torch.sqrt(1 - ddim_alpha_prev)\n\n x_t = c1 * pred_x0 + c2 * eps\n\n latent_output = x_t\n face3d_output = self._latent_to_face3d(latent_output)\n return face3d_output\n\n def sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n sample_method=\"ddpm\",\n ddim_num_step=50,\n ready_style_code=None,\n ):\n # sample_method = kwargs[\"sample_method\"]\n if sample_method == \"ddpm\":\n if ready_style_code is not None:\n raise NotImplementedError(\"ready style code in ddpm\")\n return self.ddpm_sample(\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=flexibility,\n ret_traj=ret_traj,\n use_cf_guidance=use_cf_guidance,\n cfg_scale=cfg_scale,\n )\n elif sample_method == \"ddim\":\n return self.ddim_sample(\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=flexibility,\n ret_traj=ret_traj,\n use_cf_guidance=use_cf_guidance,\n cfg_scale=cfg_scale,\n ddim_num_step=ddim_num_step,\n ready_style_code=ready_style_code,\n )\n\n def ddpm_sample(\n self,\n audio,\n style_clip,\n style_pad_mask,\n output_dim,\n flexibility=0.0,\n ret_traj=False,\n use_cf_guidance=False,\n cfg_scale=2.0,\n ):\n \"\"\"\n\n Args:\n audio (_type_): (B, L, W) or (B, L, W, C)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n pose_dim (_type_): int\n flexibility (float, optional): _description_. Defaults to 0.0.\n ret_traj (bool, optional): _description_. Defaults to False.\n\n\n Returns:\n _type_: (B, L, C_face)\n \"\"\"\n batch_size, output_len = audio.shape[:2]\n # batch_size = context.size(0)\n context = {\n \"audio\": audio,\n \"style_clip\": style_clip,\n \"style_pad_mask\": style_pad_mask,\n }\n if use_cf_guidance:\n uncond_style_clip = self.null_style_clip.unsqueeze(0).repeat(\n batch_size, 1, 1\n )\n uncond_pad_mask = self.null_pad_mask.unsqueeze(0).repeat(batch_size, 1)\n context_double = {\n \"audio\": torch.cat([audio] * 2, dim=0),\n \"style_clip\": torch.cat([style_clip, uncond_style_clip], dim=0),\n \"style_pad_mask\": torch.cat([style_pad_mask, uncond_pad_mask], dim=0),\n }\n\n x_T = torch.randn([batch_size, output_len, output_dim]).to(audio.device)\n traj = {self.var_sched.num_steps: x_T}\n for t in range(self.var_sched.num_steps, 0, -1):\n alpha = self.var_sched.alphas[t]\n alpha_bar = self.var_sched.alpha_bars[t]\n alpha_bar_prev = self.var_sched.alpha_bars[t - 1]\n sigma = self.var_sched.get_sigmas(t, flexibility)\n\n z = torch.randn_like(x_T) if t > 1 else torch.zeros_like(x_T)\n x_t = traj[t]\n t_tensor = torch.tensor([t] * batch_size).to(audio.device).float()\n if use_cf_guidance:\n x_t_double = torch.cat([x_t] * 2, dim=0)\n t_tensor_double = torch.cat([t_tensor] * 2, dim=0)\n cond_output, uncond_output = self.net(\n x_t_double, t=t_tensor_double, **context_double\n ).chunk(2)\n diff_output = uncond_output + cfg_scale * (cond_output - uncond_output)\n else:\n diff_output = self.net(x_t, t=t_tensor, **context)\n\n if self.predict_what == \"noise\":\n c0 = 1.0 / torch.sqrt(alpha)\n c1 = (1 - alpha) / torch.sqrt(1 - alpha_bar)\n x_next = c0 * (x_t - c1 * diff_output) + sigma * z\n elif self.predict_what == \"x0\":\n d0 = torch.sqrt(alpha) * (1 - alpha_bar_prev) / (1 - alpha_bar)\n d1 = torch.sqrt(alpha_bar_prev) * (1 - alpha) / (1 - alpha_bar)\n x_next = d0 * x_t + d1 * diff_output + sigma * z\n traj[t - 1] = x_next.detach()\n traj[t] = traj[t].cpu()\n if not ret_traj:\n del traj[t]\n\n if ret_traj:\n raise NotImplementedError\n return traj\n else:\n latent_output = traj[0]\n face3d_output = self._latent_to_face3d(latent_output)\n return face3d_output"
},
{
"identifier": "NoisePredictor",
"path": "core/networks/diffusion_util.py",
"snippet": "class NoisePredictor(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n content_encoder_class = get_network(cfg.CONTENT_ENCODER_TYPE)\n self.content_encoder = content_encoder_class(**cfg.CONTENT_ENCODER)\n\n style_encoder_class = get_network(cfg.STYLE_ENCODER_TYPE)\n cfg.defrost()\n cfg.STYLE_ENCODER.input_dim = cfg.DATASET.FACE3D_DIM\n cfg.freeze()\n self.style_encoder = style_encoder_class(**cfg.STYLE_ENCODER)\n\n decoder_class = get_network(cfg.DECODER_TYPE)\n cfg.defrost()\n cfg.DECODER.output_dim = cfg.DATASET.FACE3D_DIM\n cfg.freeze()\n self.decoder = decoder_class(**cfg.DECODER)\n\n self.content_xt_to_decoder_input_wo_time = nn.Sequential(\n nn.Linear(cfg.D_MODEL + cfg.DATASET.FACE3D_DIM, cfg.D_MODEL),\n nn.ReLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n nn.ReLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n )\n\n self.time_sinusoidal_dim = cfg.D_MODEL\n self.time_embed_net = nn.Sequential(\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n nn.SiLU(),\n nn.Linear(cfg.D_MODEL, cfg.D_MODEL),\n )\n\n def forward(self, x_t, t, audio, style_clip, style_pad_mask, ready_style_code=None):\n \"\"\"_summary_\n\n Args:\n x_t (_type_): (B, L, C_face)\n t (_type_): (B,) dtype:float32\n audio (_type_): (B, L, W)\n style_clip (_type_): (B, L_clipmax, C_face3d)\n style_pad_mask : (B, L_clipmax)\n ready_style_code: (B, C_model)\n Returns:\n e_theta : (B, L, C_face)\n \"\"\"\n W = audio.shape[2]\n content = self.content_encoder(audio)\n # (B, L, W, C_model)\n x_t_expand = x_t.unsqueeze(2).repeat(1, 1, W, 1)\n # (B, L, C_face) -> (B, L, W, C_face)\n content_xt_concat = torch.cat((content, x_t_expand), dim=3)\n # (B, L, W, C_model+C_face)\n decoder_input_without_time = self.content_xt_to_decoder_input_wo_time(\n content_xt_concat\n )\n # (B, L, W, C_model)\n\n time_sinusoidal = sinusoidal_embedding(t, self.time_sinusoidal_dim)\n # (B, C_embed)\n time_embedding = self.time_embed_net(time_sinusoidal)\n # (B, C_model)\n B, C = time_embedding.shape\n time_embed_expand = time_embedding.view(B, 1, 1, C)\n decoder_input = decoder_input_without_time + time_embed_expand\n # (B, L, W, C_model)\n\n if ready_style_code is not None:\n style_code = ready_style_code\n else:\n style_code = self.style_encoder(style_clip, style_pad_mask)\n # (B, C_model)\n\n e_theta = self.decoder(decoder_input, style_code)\n # (B, L, C_face)\n return e_theta"
},
{
"identifier": "VarianceSchedule",
"path": "core/networks/diffusion_util.py",
"snippet": "class VarianceSchedule(Module):\n def __init__(self, num_steps, beta_1, beta_T, mode=\"linear\"):\n super().__init__()\n assert mode in (\"linear\",)\n self.num_steps = num_steps\n self.beta_1 = beta_1\n self.beta_T = beta_T\n self.mode = mode\n\n if mode == \"linear\":\n betas = torch.linspace(beta_1, beta_T, steps=num_steps)\n\n betas = torch.cat([torch.zeros([1]), betas], dim=0) # Padding\n\n alphas = 1 - betas\n log_alphas = torch.log(alphas)\n for i in range(1, log_alphas.size(0)): # 1 to T\n log_alphas[i] += log_alphas[i - 1]\n alpha_bars = log_alphas.exp()\n\n sigmas_flex = torch.sqrt(betas)\n sigmas_inflex = torch.zeros_like(sigmas_flex)\n for i in range(1, sigmas_flex.size(0)):\n sigmas_inflex[i] = ((1 - alpha_bars[i - 1]) / (1 - alpha_bars[i])) * betas[\n i\n ]\n sigmas_inflex = torch.sqrt(sigmas_inflex)\n\n self.register_buffer(\"betas\", betas)\n self.register_buffer(\"alphas\", alphas)\n self.register_buffer(\"alpha_bars\", alpha_bars)\n self.register_buffer(\"sigmas_flex\", sigmas_flex)\n self.register_buffer(\"sigmas_inflex\", sigmas_inflex)\n\n def uniform_sample_t(self, batch_size):\n ts = np.random.choice(np.arange(1, self.num_steps + 1), batch_size)\n return ts.tolist()\n\n def get_sigmas(self, t, flexibility):\n assert 0 <= flexibility and flexibility <= 1\n sigmas = self.sigmas_flex[t] * flexibility + self.sigmas_inflex[t] * (\n 1 - flexibility\n )\n return sigmas"
},
{
"identifier": "crop_src_image",
"path": "core/utils.py",
"snippet": "def crop_src_image(src_img, save_img, increase_ratio, detector=None):\n if detector is None:\n detector = dlib.get_frontal_face_detector()\n\n img = cv2.imread(src_img)\n faces = detector(img, 0)\n h, width, _ = img.shape\n if len(faces) > 0:\n bbox = [faces[0].left(), faces[0].top(), faces[0].right(), faces[0].bottom()]\n l = bbox[3] - bbox[1]\n bbox[1] = bbox[1] - l * 0.1\n bbox[3] = bbox[3] - l * 0.1\n bbox[1] = max(0, bbox[1])\n bbox[3] = min(h, bbox[3])\n bbox = compute_aspect_preserved_bbox(\n tuple(bbox), increase_ratio, img.shape[0], img.shape[1]\n )\n img = img[bbox[1] : bbox[3], bbox[0] : bbox[2]]\n img = cv2.resize(img, (256, 256))\n cv2.imwrite(save_img, img)\n else:\n raise ValueError(\"No face detected in the input image\")\n # img = cv2.resize(img, (256, 256))\n # cv2.imwrite(save_img, img)"
},
{
"identifier": "get_pose_params",
"path": "core/utils.py",
"snippet": "def get_pose_params(mat_path):\n \"\"\"Get pose parameters from mat file\n\n Args:\n mat_path (str): path of mat file\n\n Returns:\n pose_params (numpy.ndarray): shape (L_video, 9), angle, translation, crop paramters\n \"\"\"\n mat_dict = loadmat(mat_path)\n\n np_3dmm = mat_dict[\"coeff\"]\n angles = np_3dmm[:, 224:227]\n translations = np_3dmm[:, 254:257]\n\n np_trans_params = mat_dict[\"transform_params\"]\n crop = np_trans_params[:, -3:]\n\n pose_params = np.concatenate((angles, translations, crop), axis=1)\n\n return pose_params"
},
{
"identifier": "get_video_style_clip",
"path": "core/utils.py",
"snippet": "def get_video_style_clip(\n video_name,\n video_root_dir,\n style_max_len,\n start_idx=\"random\",\n dtype=torch.float32,\n return_start_idx=False,\n):\n video_path = os.path.join(video_root_dir, video_name)\n if video_path[-3:] == \"mat\":\n face3d_all = loadmat(video_path)[\"coeff\"]\n face3d_exp = face3d_all[:, 80:144] # expression 3DMM range\n elif video_path[-3:] == \"txt\":\n face3d_exp = np.loadtxt(video_path)\n else:\n raise ValueError(\"Invalid 3DMM file extension\")\n\n face3d_exp = torch.tensor(face3d_exp, dtype=dtype)\n\n length = face3d_exp.shape[0]\n if length >= style_max_len:\n clip_num_frames = style_max_len\n if start_idx == \"random\":\n clip_start_idx = np.random.randint(low=0, high=length - clip_num_frames + 1)\n elif start_idx == \"middle\":\n clip_start_idx = (length - clip_num_frames + 1) // 2\n elif isinstance(start_idx, int):\n clip_start_idx = start_idx\n else:\n raise ValueError(f\"Invalid start_idx {start_idx}\")\n\n face3d_clip = face3d_exp[clip_start_idx : clip_start_idx + clip_num_frames]\n pad_mask = torch.tensor([False] * style_max_len)\n else:\n clip_start_idx = None\n padding = torch.zeros(style_max_len - length, face3d_exp.shape[1])\n face3d_clip = torch.cat((face3d_exp, padding), dim=0)\n pad_mask = torch.tensor([False] * length + [True] * (style_max_len - length))\n\n if return_start_idx:\n return face3d_clip, pad_mask, clip_start_idx\n else:\n return face3d_clip, pad_mask"
},
{
"identifier": "get_wav2vec_audio_window",
"path": "core/utils.py",
"snippet": "def get_wav2vec_audio_window(audio_feat, start_idx, num_frames, win_size):\n \"\"\"\n\n Args:\n audio_feat (np.ndarray): (N, 1024)\n start_idx (_type_): _description_\n num_frames (_type_): _description_\n \"\"\"\n center_idx_list = [2 * idx for idx in range(start_idx, start_idx + num_frames)]\n audio_window_list = []\n padding = np.zeros(audio_feat.shape[1], dtype=np.float32)\n for center_idx in center_idx_list:\n cur_audio_window = []\n for i in range(center_idx - win_size, center_idx + win_size + 1):\n if i < 0:\n cur_audio_window.append(padding)\n elif i >= len(audio_feat):\n cur_audio_window.append(padding)\n else:\n cur_audio_window.append(audio_feat[i])\n cur_audio_win_array = np.stack(cur_audio_window, axis=0)\n audio_window_list.append(cur_audio_win_array)\n\n audio_window_array = np.stack(audio_window_list, axis=0)\n return audio_window_array"
},
{
"identifier": "get_netG",
"path": "generators/utils.py",
"snippet": "@torch.no_grad()\ndef get_netG(checkpoint_path, device):\n import yaml\n\n from generators.face_model import FaceGenerator\n\n with open(\"generators/renderer_conf.yaml\", \"r\") as f:\n renderer_config = yaml.load(f, Loader=yaml.FullLoader)\n\n renderer = FaceGenerator(**renderer_config).to(device)\n\n checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)\n renderer.load_state_dict(checkpoint[\"net_G_ema\"], strict=False)\n\n renderer.eval()\n\n return renderer"
},
{
"identifier": "render_video",
"path": "generators/utils.py",
"snippet": "@torch.no_grad()\ndef render_video(\n net_G,\n src_img_path,\n exp_path,\n wav_path,\n output_path,\n device,\n silent=False,\n semantic_radius=13,\n fps=30,\n split_size=16,\n no_move=False,\n):\n \"\"\"\n exp: (N, 73)\n \"\"\"\n target_exp_seq = np.load(exp_path)\n if target_exp_seq.shape[1] == 257:\n exp_coeff = target_exp_seq[:, 80:144]\n angle_trans_crop = np.array(\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9370641, 126.84911, 129.03864],\n dtype=np.float32,\n )\n target_exp_seq = np.concatenate(\n [exp_coeff, angle_trans_crop[None, ...].repeat(exp_coeff.shape[0], axis=0)],\n axis=1,\n )\n # (L, 73)\n elif target_exp_seq.shape[1] == 73:\n if no_move:\n target_exp_seq[:, 64:] = np.array(\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9370641, 126.84911, 129.03864],\n dtype=np.float32,\n )\n else:\n raise NotImplementedError\n\n frame = cv2.imread(src_img_path)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n src_img_raw = Image.fromarray(frame)\n image_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n src_img = image_transform(src_img_raw)\n\n target_win_exps = []\n for frame_idx in range(len(target_exp_seq)):\n win_indices = obtain_seq_index(\n frame_idx, target_exp_seq.shape[0], semantic_radius\n )\n win_exp = torch.tensor(target_exp_seq[win_indices]).permute(1, 0)\n # (73, 27)\n target_win_exps.append(win_exp)\n\n target_exp_concat = torch.stack(target_win_exps, dim=0)\n target_splited_exps = torch.split(target_exp_concat, split_size, dim=0)\n output_imgs = []\n for win_exp in target_splited_exps:\n win_exp = win_exp.to(device)\n cur_src_img = src_img.expand(win_exp.shape[0], -1, -1, -1).to(device)\n output_dict = net_G(cur_src_img, win_exp)\n output_imgs.append(output_dict[\"fake_image\"].cpu().clamp_(-1, 1))\n\n output_imgs = torch.cat(output_imgs, 0)\n transformed_imgs = ((output_imgs + 1) / 2 * 255).to(torch.uint8).permute(0, 2, 3, 1)\n\n if silent:\n torchvision.io.write_video(output_path, transformed_imgs.cpu(), fps)\n else:\n silent_video_path = f\"{output_path}-silent.mp4\"\n torchvision.io.write_video(silent_video_path, transformed_imgs.cpu(), fps)\n os.system(\n f\"ffmpeg -loglevel quiet -y -i {silent_video_path} -i {wav_path} -shortest {output_path}\"\n )\n os.remove(silent_video_path)"
}
] | import argparse
import json
import os
import shutil
import subprocess
import numpy as np
import torch
import torchaudio
from scipy.io import loadmat
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2Model
from configs.default import get_cfg_defaults
from core.networks.diffusion_net import DiffusionNet
from core.networks.diffusion_util import NoisePredictor, VarianceSchedule
from core.utils import (
crop_src_image,
get_pose_params,
get_video_style_clip,
get_wav2vec_audio_window,
)
from generators.utils import get_netG, render_video | 7,935 | gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
crop_src_image(args.image_path, src_img_path, 0.4)
else:
shutil.copy(args.image_path, src_img_path)
with torch.no_grad():
# get diff model and load checkpoint
diff_net = get_diff_net(cfg, device).to(device)
# generate face motion
face_motion_path = os.path.join(tmp_dir, f"{args.output_name}_facemotion.npy")
inference_one_video(
cfg,
audio_feat_path,
args.style_clip_path,
args.pose_path,
face_motion_path,
diff_net,
device,
max_audio_len=args.max_gen_len,
)
# get renderer
|
@torch.no_grad()
def get_diff_net(cfg, device):
diff_net = DiffusionNet(
cfg=cfg,
net=NoisePredictor(cfg),
var_sched=VarianceSchedule(
num_steps=cfg.DIFFUSION.SCHEDULE.NUM_STEPS,
beta_1=cfg.DIFFUSION.SCHEDULE.BETA_1,
beta_T=cfg.DIFFUSION.SCHEDULE.BETA_T,
mode=cfg.DIFFUSION.SCHEDULE.MODE,
),
)
checkpoint = torch.load(cfg.INFERENCE.CHECKPOINT, map_location=device)
model_state_dict = checkpoint["model_state_dict"]
diff_net_dict = {
k[9:]: v for k, v in model_state_dict.items() if k[:9] == "diff_net."
}
diff_net.load_state_dict(diff_net_dict, strict=True)
diff_net.eval()
return diff_net
@torch.no_grad()
def get_audio_feat(wav_path, output_name, wav2vec_model):
audio_feat_dir = os.path.dirname(audio_feat_path)
pass
@torch.no_grad()
def inference_one_video(
cfg,
audio_path,
style_clip_path,
pose_path,
output_path,
diff_net,
device,
max_audio_len=None,
sample_method="ddim",
ddim_num_step=10,
):
audio_raw = audio_data = np.load(audio_path)
if max_audio_len is not None:
audio_raw = audio_raw[: max_audio_len * 50]
gen_num_frames = len(audio_raw) // 2
audio_win_array = get_wav2vec_audio_window(
audio_raw,
start_idx=0,
num_frames=gen_num_frames,
win_size=cfg.WIN_SIZE,
)
audio_win = torch.tensor(audio_win_array).to(device)
audio = audio_win.unsqueeze(0)
# the second parameter is "" because of bad interface design...
style_clip_raw, style_pad_mask_raw = get_video_style_clip(
style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
crop_src_image(args.image_path, src_img_path, 0.4)
else:
shutil.copy(args.image_path, src_img_path)
with torch.no_grad():
# get diff model and load checkpoint
diff_net = get_diff_net(cfg, device).to(device)
# generate face motion
face_motion_path = os.path.join(tmp_dir, f"{args.output_name}_facemotion.npy")
inference_one_video(
cfg,
audio_feat_path,
args.style_clip_path,
args.pose_path,
face_motion_path,
diff_net,
device,
max_audio_len=args.max_gen_len,
)
# get renderer | renderer = get_netG("checkpoints/renderer.pt", device) | 8 | 2023-12-28 05:39:31+00:00 | 12k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/schedulers/scheduling_sde_ve.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())"
},
{
"identifier": "register_to_config",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)"
},
{
"identifier": "BaseOutput",
"path": "diffusers/src/diffusers/utils/outputs.py",
"snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __init_subclass__(cls) -> None:\n \"\"\"Register subclasses as pytree nodes.\n\n This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with\n `static_graph=True` with modules that output `ModelOutput` subclasses.\n \"\"\"\n if is_torch_available():\n import torch.utils._pytree\n\n torch.utils._pytree._register_pytree_node(\n cls,\n torch.utils._pytree._dict_flatten,\n lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),\n )\n\n def __post_init__(self) -> None:\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k: Any) -> Any:\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any, ...]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())"
},
{
"identifier": "randn_tensor",
"path": "diffusers/src/diffusers/utils/torch_utils.py",
"snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"A helper function to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor\n is always created on the CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n # make sure generator list of length 1 is treated like a non-list\n if isinstance(generator, list) and len(generator) == 1:\n generator = generator[0]\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents"
},
{
"identifier": "SchedulerMixin",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerMixin(PushToHubMixin):\n \"\"\"\n Base class for all schedulers.\n\n [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving\n functionalities.\n\n [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to\n the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler\n class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the scheduler\n configuration saved with [`~SchedulerMixin.save_pretrained`].\n subfolder (`str`, *optional*):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to a directory so that it can be reloaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes"
},
{
"identifier": "SchedulerOutput",
"path": "diffusers/src/diffusers/schedulers/scheduling_utils.py",
"snippet": "class SchedulerOutput(BaseOutput):\n \"\"\"\n Base class for the output of a scheduler's `step` function.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n \"\"\"\n\n prev_sample: torch.FloatTensor"
}
] | import math
import torch
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput | 9,419 | # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
@dataclass
class SdeVeOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample` over previous timesteps.
"""
prev_sample: torch.FloatTensor
prev_sample_mean: torch.FloatTensor
| # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
@dataclass
class SdeVeOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample` over previous timesteps.
"""
prev_sample: torch.FloatTensor
prev_sample_mean: torch.FloatTensor
| class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): | 4 | 2023-12-28 08:17:40+00:00 | 12k |
FoundationVision/UniRef | projects/UniRef/uniref/data/datasets/ytvis.py | [
{
"identifier": "Boxes",
"path": "detectron2/structures/boxes.py",
"snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor"
},
{
"identifier": "BoxMode",
"path": "detectron2/structures/boxes.py",
"snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr"
},
{
"identifier": "PolygonMasks",
"path": "detectron2/structures/masks.py",
"snippet": "class PolygonMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\n\n Attributes:\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\n \"\"\"\n\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\n \"\"\"\n Arguments:\n polygons (list[list[np.ndarray]]): The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n instance, and the third level to the polygon coordinates.\n The third level array should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n \"\"\"\n if not isinstance(polygons, list):\n raise ValueError(\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\n \"Got '{}' instead.\".format(type(polygons))\n )\n\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n # Use float64 for higher precision, because why not?\n # Always put polygons on CPU (self.to is a no-op) since they\n # are supposed to be small tensors.\n # May need to change this assumption if GPU placement becomes useful\n if isinstance(t, torch.Tensor):\n t = t.cpu().numpy()\n return np.asarray(t).astype(\"float64\")\n\n def process_polygons(\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[np.ndarray]:\n if not isinstance(polygons_per_instance, list):\n raise ValueError(\n \"Cannot create polygons: Expect a list of polygons per instance. \"\n \"Got '{}' instead.\".format(type(polygons_per_instance))\n )\n # transform each polygon to a numpy array\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\n for polygon in polygons_per_instance:\n if len(polygon) % 2 != 0 or len(polygon) < 6:\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\n return polygons_per_instance\n\n self.polygons: List[List[np.ndarray]] = [\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\n ]\n\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\n return self\n\n @property\n def device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around polygon masks.\n \"\"\"\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\n for idx, polygons_per_instance in enumerate(self.polygons):\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\n maxxy = torch.zeros(2, dtype=torch.float32)\n for polygon in polygons_per_instance:\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\n boxes[idx, :2] = minxy\n boxes[idx, 2:] = maxxy\n return Boxes(boxes)\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor:\n a BoolTensor which represents whether each mask is empty (False) or not (True).\n \"\"\"\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\n return torch.from_numpy(np.asarray(keep, dtype=np.bool))\n\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n \"\"\"\n Support indexing over the instances and return a `PolygonMasks` object.\n `item` can be:\n\n 1. An integer. It will return an object with only one instance.\n 2. A slice. It will return an object with the selected instances.\n 3. A list[int]. It will return an object with the selected instances,\n correpsonding to the indices in the list.\n 4. A vector mask of type BoolTensor, whose length is num_instances.\n It will return an object with the instances whose mask is nonzero.\n \"\"\"\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)\n\n def __iter__(self) -> Iterator[List[np.ndarray]]:\n \"\"\"\n Yields:\n list[ndarray]: the polygons for one instance.\n Each Tensor is a float64 vector representing a polygon.\n \"\"\"\n return iter(self.polygons)\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.polygons))\n return s\n\n def __len__(self) -> int:\n return len(self.polygons)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)\n\n def area(self):\n \"\"\"\n Computes area of the mask.\n Only works with Polygons, using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Returns:\n Tensor: a vector, area for each instance\n \"\"\"\n\n area = []\n for polygons_per_instance in self.polygons:\n area_per_instance = 0\n for p in polygons_per_instance:\n area_per_instance += polygon_area(p[0::2], p[1::2])\n area.append(area_per_instance)\n\n return torch.tensor(area)\n\n @staticmethod\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\n \"\"\"\n Concatenates a list of PolygonMasks into a single PolygonMasks\n\n Arguments:\n polymasks_list (list[PolygonMasks])\n\n Returns:\n PolygonMasks: the concatenated PolygonMasks\n \"\"\"\n assert isinstance(polymasks_list, (list, tuple))\n assert len(polymasks_list) > 0\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\n\n cat_polymasks = type(polymasks_list[0])(\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\n )\n return cat_polymasks"
},
{
"identifier": "DatasetCatalog",
"path": "detectron2/data/catalog.py",
"snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }"
}
] | import contextlib
import io
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import detectron2.data.datasets # noqa # add pre-defined metadata
import sys
import copy
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.structures import Boxes, BoxMode, PolygonMasks
from detectron2.data import DatasetCatalog, MetadataCatalog
from pycocotools.ytvos import YTVOS
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from PIL import Image | 10,338 | logger.info("Loaded {} videos in YTVIS format from {}".format(len(vids_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "category_id", "id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (vid_dict, anno_dict_list) in vids_anns:
record = {}
record["file_names"] = [os.path.join(image_root, vid_dict["file_names"][i]) for i in range(vid_dict["length"])]
record["height"] = vid_dict["height"]
record["width"] = vid_dict["width"]
record["length"] = vid_dict["length"]
if "expressions" in vid_dict:
# for ref-youtube-vos and ref-davis
record["expressions"] = vid_dict["expressions"]
# for evaluation
if "exp_id" in vid_dict:
record["exp_id"] = vid_dict["exp_id"]
if "video" in vid_dict:
record["video"] = vid_dict["video"]
video_id = record["video_id"] = vid_dict["id"]
# store the dataset name
if "expressions" in vid_dict:
record["task"] = "rvos"
if dataset_name.startswith("video-refcoco"):
record["dataset_name"] = "video-refcoco"
elif dataset_name.startswith("refytvos"):
record["dataset_name"] = "refytvos"
elif dataset_name.startswith("refdavis"):
record["dataset_name"] = dataset_name # refdavis-val-0,1,2,3
else:
record["task"] = "vos"
if dataset_name.startswith("video-coco"):
record["dataset_name"] = "video-coco"
elif dataset_name.startswith("ytbvos18"):
record["dataset_name"] = "ytbvos18"
elif dataset_name.startswith("ytbvos19"):
record["dataset_name"] = "ytbvos19"
elif dataset_name.startswith("davis17"):
record["dataset_name"] = "davis17"
elif dataset_name.startswith("ovis"):
record["dataset_name"] = "ovis"
elif dataset_name.startswith("vos-lvos"):
record["dataset_name"] = "vos-lvos"
elif dataset_name.startswith("mose"):
record["dataset_name"] = "mose"
video_objs = []
for frame_idx in range(record["length"]):
frame_objs = []
for anno in anno_dict_list:
assert anno["video_id"] == video_id
obj = {key: anno[key] for key in ann_keys if key in anno}
_bboxes = anno.get("bboxes", None)
_segm = anno.get("segmentations", None)
if not (_bboxes and _segm and _bboxes[frame_idx] and _segm[frame_idx]):
continue
if "ori_id" in anno:
# for VOS inference
obj["ori_id"] = anno["ori_id"]
bbox = _bboxes[frame_idx]
segm = _segm[frame_idx]
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYWH_ABS
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
elif segm:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
frame_objs.append(obj)
video_objs.append(frame_objs)
record["annotations"] = video_objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. "
"A valid polygon should be a list[float] with even length >= 6."
)
return dataset_dicts
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains functions to parse YTVIS dataset of
COCO-format annotations into dicts in "Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_ytvis_json", "register_ytvis_instances"]
YTVIS_CATEGORIES_2019 = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [0, 82, 0], "isthing": 1, "id": 2, "name": "giant_panda"},
{"color": [119, 11, 32], "isthing": 1, "id": 3, "name": "lizard"},
{"color": [165, 42, 42], "isthing": 1, "id": 4, "name": "parrot"},
{"color": [134, 134, 103], "isthing": 1, "id": 5, "name": "skateboard"},
{"color": [0, 0, 142], "isthing": 1, "id": 6, "name": "sedan"},
{"color": [255, 109, 65], "isthing": 1, "id": 7, "name": "ape"},
{"color": [0, 226, 252], "isthing": 1, "id": 8, "name": "dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 9, "name": "snake"},
{"color": [0, 60, 100], "isthing": 1, "id": 10, "name": "monkey"},
{"color": [250, 170, 30], "isthing": 1, "id": 11, "name": "hand"},
{"color": [100, 170, 30], "isthing": 1, "id": 12, "name": "rabbit"},
{"color": [179, 0, 194], "isthing": 1, "id": 13, "name": "duck"},
{"color": [255, 77, 255], "isthing": 1, "id": 14, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 15, "name": "cow"},
{"color": [73, 77, 174], "isthing": 1, "id": 16, "name": "fish"},
{"color": [0, 80, 100], "isthing": 1, "id": 17, "name": "train"},
{"color": [182, 182, 255], "isthing": 1, "id": 18, "name": "horse"},
{"color": [0, 143, 149], "isthing": 1, "id": 19, "name": "turtle"},
{"color": [174, 57, 255], "isthing": 1, "id": 20, "name": "bear"},
{"color": [0, 0, 230], "isthing": 1, "id": 21, "name": "motorbike"},
{"color": [72, 0, 118], "isthing": 1, "id": 22, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 23, "name": "leopard"},
{"color": [0, 125, 92], "isthing": 1, "id": 24, "name": "fox"},
{"color": [209, 0, 151], "isthing": 1, "id": 25, "name": "deer"},
{"color": [188, 208, 182], "isthing": 1, "id": 26, "name": "owl"},
{"color": [145, 148, 174], "isthing": 1, "id": 27, "name": "surfboard"},
{"color": [106, 0, 228], "isthing": 1, "id": 28, "name": "airplane"},
{"color": [0, 0, 70], "isthing": 1, "id": 29, "name": "truck"},
{"color": [199, 100, 0], "isthing": 1, "id": 30, "name": "zebra"},
{"color": [166, 196, 102], "isthing": 1, "id": 31, "name": "tiger"},
{"color": [110, 76, 0], "isthing": 1, "id": 32, "name": "elephant"},
{"color": [133, 129, 255], "isthing": 1, "id": 33, "name": "snowboard"},
{"color": [0, 0, 192], "isthing": 1, "id": 34, "name": "boat"},
{"color": [183, 130, 88], "isthing": 1, "id": 35, "name": "shark"},
{"color": [130, 114, 135], "isthing": 1, "id": 36, "name": "mouse"},
{"color": [107, 142, 35], "isthing": 1, "id": 37, "name": "frog"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "eagle"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "earless_seal"},
{"color": [255, 208, 186], "isthing": 1, "id": 40, "name": "tennis_racket"},
]
YTVIS_CATEGORIES_2021 = [
{"color": [106, 0, 228], "isthing": 1, "id": 1, "name": "airplane"},
{"color": [174, 57, 255], "isthing": 1, "id": 2, "name": "bear"},
{"color": [255, 109, 65], "isthing": 1, "id": 3, "name": "bird"},
{"color": [0, 0, 192], "isthing": 1, "id": 4, "name": "boat"},
{"color": [0, 0, 142], "isthing": 1, "id": 5, "name": "car"},
{"color": [255, 77, 255], "isthing": 1, "id": 6, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 7, "name": "cow"},
{"color": [209, 0, 151], "isthing": 1, "id": 8, "name": "deer"},
{"color": [0, 226, 252], "isthing": 1, "id": 9, "name": "dog"},
{"color": [179, 0, 194], "isthing": 1, "id": 10, "name": "duck"},
{"color": [174, 255, 243], "isthing": 1, "id": 11, "name": "earless_seal"},
{"color": [110, 76, 0], "isthing": 1, "id": 12, "name": "elephant"},
{"color": [73, 77, 174], "isthing": 1, "id": 13, "name": "fish"},
{"color": [250, 170, 30], "isthing": 1, "id": 14, "name": "flying_disc"},
{"color": [0, 125, 92], "isthing": 1, "id": 15, "name": "fox"},
{"color": [107, 142, 35], "isthing": 1, "id": 16, "name": "frog"},
{"color": [0, 82, 0], "isthing": 1, "id": 17, "name": "giant_panda"},
{"color": [72, 0, 118], "isthing": 1, "id": 18, "name": "giraffe"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [255, 179, 240], "isthing": 1, "id": 20, "name": "leopard"},
{"color": [119, 11, 32], "isthing": 1, "id": 21, "name": "lizard"},
{"color": [0, 60, 100], "isthing": 1, "id": 22, "name": "monkey"},
{"color": [0, 0, 230], "isthing": 1, "id": 23, "name": "motorbike"},
{"color": [130, 114, 135], "isthing": 1, "id": 24, "name": "mouse"},
{"color": [165, 42, 42], "isthing": 1, "id": 25, "name": "parrot"},
{"color": [220, 20, 60], "isthing": 1, "id": 26, "name": "person"},
{"color": [100, 170, 30], "isthing": 1, "id": 27, "name": "rabbit"},
{"color": [183, 130, 88], "isthing": 1, "id": 28, "name": "shark"},
{"color": [134, 134, 103], "isthing": 1, "id": 29, "name": "skateboard"},
{"color": [5, 121, 0], "isthing": 1, "id": 30, "name": "snake"},
{"color": [133, 129, 255], "isthing": 1, "id": 31, "name": "snowboard"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "squirrel"},
{"color": [145, 148, 174], "isthing": 1, "id": 33, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 34, "name": "tennis_racket"},
{"color": [166, 196, 102], "isthing": 1, "id": 35, "name": "tiger"},
{"color": [0, 80, 100], "isthing": 1, "id": 36, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 37, "name": "truck"},
{"color": [0, 143, 149], "isthing": 1, "id": 38, "name": "turtle"},
{"color": [0, 228, 0], "isthing": 1, "id": 39, "name": "whale"},
{"color": [199, 100, 0], "isthing": 1, "id": 40, "name": "zebra"},
]
def _get_ytvis_2019_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_ytvis_2021_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def load_ytvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
ytvis_api = YTVOS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(ytvis_api.getCatIds())
cats = ytvis_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.
# We deal with COCO's id issue and translate
# the category ids to contiguous ids in [0, 80).
# It works by looking at the "categories" field in the json, therefore
# if users' own json also have incontiguous ids, we'll
# apply this mapping as well but print a warning.
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
# sort indices for reproducible results
vid_ids = sorted(ytvis_api.vids.keys())
# vids is a list of dicts, each looks something like:
# {'license': 1,
# 'flickr_url': ' ',
# 'file_names': ['ff25f55852/00000.jpg', 'ff25f55852/00005.jpg', ..., 'ff25f55852/00175.jpg'],
# 'height': 720,
# 'width': 1280,
# 'length': 36,
# 'date_captured': '2019-04-11 00:55:41.903902',
# 'id': 2232}
vids = ytvis_api.loadVids(vid_ids)
anns = [ytvis_api.vidToAnns[vid_id] for vid_id in vid_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(ytvis_api.anns)
if total_num_valid_anns < total_num_anns:
logger.warning(
f"{json_file} contains {total_num_anns} annotations, but only "
f"{total_num_valid_anns} of them match to images in the file."
)
vids_anns = list(zip(vids, anns))
logger.info("Loaded {} videos in YTVIS format from {}".format(len(vids_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "category_id", "id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (vid_dict, anno_dict_list) in vids_anns:
record = {}
record["file_names"] = [os.path.join(image_root, vid_dict["file_names"][i]) for i in range(vid_dict["length"])]
record["height"] = vid_dict["height"]
record["width"] = vid_dict["width"]
record["length"] = vid_dict["length"]
if "expressions" in vid_dict:
# for ref-youtube-vos and ref-davis
record["expressions"] = vid_dict["expressions"]
# for evaluation
if "exp_id" in vid_dict:
record["exp_id"] = vid_dict["exp_id"]
if "video" in vid_dict:
record["video"] = vid_dict["video"]
video_id = record["video_id"] = vid_dict["id"]
# store the dataset name
if "expressions" in vid_dict:
record["task"] = "rvos"
if dataset_name.startswith("video-refcoco"):
record["dataset_name"] = "video-refcoco"
elif dataset_name.startswith("refytvos"):
record["dataset_name"] = "refytvos"
elif dataset_name.startswith("refdavis"):
record["dataset_name"] = dataset_name # refdavis-val-0,1,2,3
else:
record["task"] = "vos"
if dataset_name.startswith("video-coco"):
record["dataset_name"] = "video-coco"
elif dataset_name.startswith("ytbvos18"):
record["dataset_name"] = "ytbvos18"
elif dataset_name.startswith("ytbvos19"):
record["dataset_name"] = "ytbvos19"
elif dataset_name.startswith("davis17"):
record["dataset_name"] = "davis17"
elif dataset_name.startswith("ovis"):
record["dataset_name"] = "ovis"
elif dataset_name.startswith("vos-lvos"):
record["dataset_name"] = "vos-lvos"
elif dataset_name.startswith("mose"):
record["dataset_name"] = "mose"
video_objs = []
for frame_idx in range(record["length"]):
frame_objs = []
for anno in anno_dict_list:
assert anno["video_id"] == video_id
obj = {key: anno[key] for key in ann_keys if key in anno}
_bboxes = anno.get("bboxes", None)
_segm = anno.get("segmentations", None)
if not (_bboxes and _segm and _bboxes[frame_idx] and _segm[frame_idx]):
continue
if "ori_id" in anno:
# for VOS inference
obj["ori_id"] = anno["ori_id"]
bbox = _bboxes[frame_idx]
segm = _segm[frame_idx]
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYWH_ABS
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
elif segm:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
frame_objs.append(obj)
video_objs.append(frame_objs)
record["annotations"] = video_objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. "
"A valid polygon should be a list[float] with even length >= 6."
)
return dataset_dicts
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts | DatasetCatalog.register(name, lambda: load_ytvis_json(json_file, image_root, name)) | 3 | 2023-12-22 13:31:33+00:00 | 12k |
xhuangcv/humannorm | threestudio/models/renderers/gan_volume_renderer.py | [
{
"identifier": "BaseBackground",
"path": "threestudio/models/background/base.py",
"snippet": "class BaseBackground(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n def configure(self):\n pass\n\n def forward(self, dirs: Float[Tensor, \"B H W 3\"]) -> Float[Tensor, \"B H W Nc\"]:\n raise NotImplementedError"
},
{
"identifier": "BaseImplicitGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n use_sdf_loss: bool = False\n start_sdf_loss_step: int = 3000\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]], Optional[Float[Tensor, \"*N 1\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation, sdf_loss = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation, sdf_loss\n\n assert self.isosurface_helper is not None\n\n field, deformation, sdf_loss = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh, sdf_loss\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh, sdf_loss = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh, sdf_loss = self._isosurface(self.bbox)\n if self.cfg.use_sdf_loss:\n return mesh, sdf_loss\n else:\n return mesh"
},
{
"identifier": "BaseMaterial",
"path": "threestudio/models/materials/base.py",
"snippet": "class BaseMaterial(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n requires_normal: bool = False\n requires_tangent: bool = False\n\n def configure(self):\n pass\n\n def forward(self, *args, **kwargs) -> Float[Tensor, \"*B 3\"]:\n raise NotImplementedError\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "VolumeRenderer",
"path": "threestudio/models/renderers/base.py",
"snippet": "class VolumeRenderer(Renderer):\n pass"
},
{
"identifier": "NLayerDiscriminator",
"path": "threestudio/utils/GAN/discriminator.py",
"snippet": "class NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator as in Pix2Pix\n --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py\n \"\"\"\n\n def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):\n \"\"\"Construct a PatchGAN discriminator\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if not use_actnorm:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = ActNorm\n if (\n type(norm_layer) == functools.partial\n ): # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True),\n ]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(\n ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kw,\n stride=2,\n padding=padw,\n bias=use_bias,\n ),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(\n ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kw,\n stride=1,\n padding=padw,\n bias=use_bias,\n ),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n ] # output 1 channel prediction map\n self.main = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.main(input)"
},
{
"identifier": "weights_init",
"path": "threestudio/utils/GAN/discriminator.py",
"snippet": "def weights_init(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm\") != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "threestudio/utils/GAN/distribution.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean"
},
{
"identifier": "MobileNetV3",
"path": "threestudio/utils/GAN/mobilenet.py",
"snippet": "class MobileNetV3(nn.Module):\n def __init__(\n self, n_class=1000, input_size=224, dropout=0.0, mode=\"small\", width_mult=1.0\n ):\n super(MobileNetV3, self).__init__()\n input_channel = 16\n last_channel = 1280\n if mode == \"large\":\n # refer to Table 1 in paper\n mobile_setting = [\n # k, exp, c, se, nl, s,\n [3, 16, 16, False, \"RE\", 1],\n [3, 64, 24, False, \"RE\", 2],\n [3, 72, 24, False, \"RE\", 1],\n [5, 72, 40, True, \"RE\", 2],\n [5, 120, 40, True, \"RE\", 1],\n [5, 120, 40, True, \"RE\", 1],\n [3, 240, 80, False, \"HS\", 2],\n [3, 200, 80, False, \"HS\", 1],\n [3, 184, 80, False, \"HS\", 1],\n [3, 184, 80, False, \"HS\", 1],\n [3, 480, 112, True, \"HS\", 1],\n [3, 672, 112, True, \"HS\", 1],\n [5, 672, 160, True, \"HS\", 2],\n [5, 960, 160, True, \"HS\", 1],\n [5, 960, 160, True, \"HS\", 1],\n ]\n elif mode == \"small\":\n # refer to Table 2 in paper\n mobile_setting = [\n # k, exp, c, se, nl, s,\n [3, 16, 16, True, \"RE\", 2],\n [3, 72, 24, False, \"RE\", 2],\n [3, 88, 24, False, \"RE\", 1],\n [5, 96, 40, True, \"HS\", 2],\n [5, 240, 40, True, \"HS\", 1],\n [5, 240, 40, True, \"HS\", 1],\n [5, 120, 48, True, \"HS\", 1],\n [5, 144, 48, True, \"HS\", 1],\n [5, 288, 96, True, \"HS\", 2],\n [5, 576, 96, True, \"HS\", 1],\n [5, 576, 96, True, \"HS\", 1],\n ]\n else:\n raise NotImplementedError\n\n # building first layer\n assert input_size % 32 == 0\n last_channel = (\n make_divisible(last_channel * width_mult)\n if width_mult > 1.0\n else last_channel\n )\n self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)]\n self.classifier = []\n\n # building mobile blocks\n for k, exp, c, se, nl, s in mobile_setting:\n output_channel = make_divisible(c * width_mult)\n exp_channel = make_divisible(exp * width_mult)\n self.features.append(\n MobileBottleneck(\n input_channel, output_channel, k, s, exp_channel, se, nl\n )\n )\n input_channel = output_channel\n\n # building last several layers\n if mode == \"large\":\n last_conv = make_divisible(960 * width_mult)\n self.features.append(\n conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)\n )\n self.features.append(nn.AdaptiveAvgPool2d(1))\n self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))\n self.features.append(Hswish(inplace=True))\n elif mode == \"small\":\n last_conv = make_divisible(576 * width_mult)\n self.features.append(\n conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)\n )\n # self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake\n self.features.append(nn.AdaptiveAvgPool2d(1))\n self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))\n self.features.append(Hswish(inplace=True))\n else:\n raise NotImplementedError\n\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(p=dropout), # refer to paper section 6\n nn.Linear(last_channel, n_class),\n )\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.mean(3).mean(2)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\")\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.zeros_(m.bias)"
},
{
"identifier": "Decoder",
"path": "threestudio/utils/GAN/vae.py",
"snippet": "class Decoder(nn.Module):\n def __init__(\n self,\n *,\n ch,\n out_ch,\n ch_mult=(1, 2, 4, 8),\n num_res_blocks,\n attn_resolutions,\n dropout=0.0,\n resamp_with_conv=True,\n in_channels,\n resolution,\n z_channels,\n give_pre_end=False,\n tanh_out=False,\n use_linear_attn=False,\n attn_type=\"vanilla\",\n **ignorekwargs,\n ):\n super().__init__()\n if use_linear_attn:\n attn_type = \"linear\"\n self.ch = ch\n # self.temb_ch = 3\n self.temb_ch = 64\n # self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n self.attn_resolutions = attn_resolutions\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,) + tuple(ch_mult)\n block_in = ch * ch_mult[self.num_resolutions - 1]\n curr_res = resolution // 2 ** (self.num_resolutions - 1)\n self.z_shape = (1, z_channels, curr_res, curr_res)\n print(\n \"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)\n )\n )\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(\n z_channels, block_in, kernel_size=3, stride=1, padding=1\n )\n\n self.conv_in3 = torch.nn.Conv2d(\n z_channels + 3, block_in, kernel_size=3, stride=1, padding=1\n )\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(\n in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(\n in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_out = ch * ch_mult[i_level]\n for i_block in range(self.num_res_blocks + 1):\n block.append(\n ResnetBlock(\n in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n )\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n up = nn.Module()\n up.block = block\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n up.rgb_conv = torch.nn.Conv2d(\n block_in + 3, 3, kernel_size=3, stride=1, padding=1\n )\n up.rgb_cat_conv = torch.nn.Conv2d(\n block_in + 3, block_in, kernel_size=3, stride=1, padding=1\n )\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(\n block_in, out_ch, kernel_size=3, stride=1, padding=1\n )\n\n def forward(self, z, temb=None):\n # assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n # temb = None\n\n # z to block_in\n rgb = z[:, :3]\n if z.shape[1] == self.z_shape[1] + 3:\n h = self.conv_in3(z)\n else:\n h = self.conv_in(z)\n\n # middle\n # h = self.mid.block_1(h, temb)\n # h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks + 1):\n h = self.up[i_level].block[i_block](h, temb)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n\n rgb = torch.nn.functional.interpolate(rgb, scale_factor=4.0, mode=\"bilinear\")\n rgb = torch.sigmoid(torch.logit(rgb, eps=1e-3) + h)\n return rgb"
},
{
"identifier": "Encoder",
"path": "threestudio/utils/GAN/vae.py",
"snippet": "class Encoder(nn.Module):\n def __init__(\n self,\n *,\n ch,\n out_ch,\n ch_mult=(1, 2, 4, 8),\n num_res_blocks,\n attn_resolutions,\n dropout=0.0,\n resamp_with_conv=True,\n in_channels,\n resolution,\n z_channels,\n double_z=True,\n use_linear_attn=False,\n attn_type=\"vanilla\",\n **ignore_kwargs,\n ):\n super().__init__()\n if use_linear_attn:\n attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.attn_resolutions = attn_resolutions\n\n # downsampling\n self.conv_in = torch.nn.Conv2d(\n in_channels, self.ch, kernel_size=3, stride=1, padding=1\n )\n\n curr_res = resolution\n in_ch_mult = (1,) + tuple(ch_mult)\n self.in_ch_mult = in_ch_mult\n self.down = nn.ModuleList()\n for i_level in range(self.num_resolutions):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_in = ch * in_ch_mult[i_level]\n block_out = ch * ch_mult[i_level]\n for i_block in range(self.num_res_blocks):\n block.append(\n ResnetBlock(\n in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n )\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n down = nn.Module()\n down.block = block\n down.attn = attn\n if i_level != self.num_resolutions - 1:\n down.downsample = Downsample(block_in, resamp_with_conv)\n curr_res = curr_res // 2\n self.down.append(down)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(\n in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n if len(attn_resolutions) > 0:\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(\n in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout,\n )\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(\n block_in,\n 2 * z_channels if double_z else z_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n\n def forward(self, x):\n # timestep embedding\n temb = None\n\n # downsampling\n hs = [self.conv_in(x)]\n for i_level in range(self.num_resolutions):\n for i_block in range(self.num_res_blocks):\n h = self.down[i_level].block[i_block](hs[-1], temb)\n if len(self.down[i_level].attn) > 0:\n h = self.down[i_level].attn[i_block](h)\n hs.append(h)\n if i_level != self.num_resolutions - 1:\n hs.append(self.down[i_level].downsample(hs[-1]))\n\n # middle\n h = hs[-1]\n h = self.mid.block_1(h, temb)\n if len(self.attn_resolutions) > 0:\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # end\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n return h"
}
] | from dataclasses import dataclass
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.GAN.discriminator import NLayerDiscriminator, weights_init
from threestudio.utils.GAN.distribution import DiagonalGaussianDistribution
from threestudio.utils.GAN.mobilenet import MobileNetV3 as GlobalEncoder
from threestudio.utils.GAN.vae import Decoder as Generator
from threestudio.utils.GAN.vae import Encoder as LocalEncoder
from threestudio.utils.typing import *
import torch
import torch.nn.functional as F
import threestudio | 7,243 |
@threestudio.register("gan-volume-renderer")
class GANVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
self.ch_mult = [1, 2, 4]
self.generator = Generator(
ch=64,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=7,
resolution=512,
z_channels=4,
)
self.local_encoder = LocalEncoder(
ch=32,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=3,
resolution=512,
z_channels=4,
)
self.global_encoder = GlobalEncoder(n_class=64)
|
@threestudio.register("gan-volume-renderer")
class GANVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
self.ch_mult = [1, 2, 4]
self.generator = Generator(
ch=64,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=7,
resolution=512,
z_channels=4,
)
self.local_encoder = LocalEncoder(
ch=32,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=3,
resolution=512,
z_channels=4,
)
self.global_encoder = GlobalEncoder(n_class=64) | self.discriminator = NLayerDiscriminator( | 4 | 2023-12-23 12:37:48+00:00 | 12k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/wrapper.py | [
{
"identifier": "StreamDiffusion",
"path": "streamdiffusion/pipeline.py",
"snippet": "class StreamDiffusion:\n def __init__(\n self,\n pipe: StableDiffusionPipeline,\n t_index_list: List[int],\n torch_dtype: torch.dtype = torch.float16,\n width: int = 512,\n height: int = 512,\n do_add_noise: bool = True,\n use_denoising_batch: bool = True,\n frame_buffer_size: int = 1,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",\n ) -> None:\n self.device = pipe.device\n self.dtype = torch_dtype\n self.generator = None\n\n self.height = height\n self.width = width\n\n self.latent_height = int(height // pipe.vae_scale_factor)\n self.latent_width = int(width // pipe.vae_scale_factor)\n\n self.frame_bff_size = frame_buffer_size\n self.denoising_steps_num = len(t_index_list)\n\n self.cfg_type = cfg_type\n\n if use_denoising_batch:\n self.batch_size = self.denoising_steps_num * frame_buffer_size\n if self.cfg_type == \"initialize\":\n self.trt_unet_batch_size = (\n self.denoising_steps_num + 1\n ) * self.frame_bff_size\n elif self.cfg_type == \"full\":\n self.trt_unet_batch_size = (\n 2 * self.denoising_steps_num * self.frame_bff_size\n )\n else:\n self.trt_unet_batch_size = self.denoising_steps_num * frame_buffer_size\n else:\n self.trt_unet_batch_size = self.frame_bff_size\n self.batch_size = frame_buffer_size\n\n self.t_list = t_index_list\n\n self.do_add_noise = do_add_noise\n self.use_denoising_batch = use_denoising_batch\n\n self.similar_image_filter = False\n self.similar_filter = SimilarImageFilter()\n self.prev_image_result = None\n\n self.pipe = pipe\n self.image_processor = VaeImageProcessor(pipe.vae_scale_factor)\n\n self.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)\n self.text_encoder = pipe.text_encoder\n self.unet = pipe.unet\n self.vae = pipe.vae\n\n self.inference_time_ema = 0\n\n def set_sampler_param(self,\n t_index_list: List[int],\n width: int = 512,\n height: int = 512,\n do_add_noise: bool = True,\n use_denoising_batch: bool = True,\n frame_buffer_size: int = 1,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",):\n self.height = height\n self.width = width\n\n self.latent_height = int(height // self.pipe.vae_scale_factor)\n self.latent_width = int(width // self.pipe.vae_scale_factor)\n\n self.frame_bff_size = frame_buffer_size\n \n self.cfg_type = cfg_type\n self.t_list = t_index_list\n \n self.do_add_noise = do_add_noise\n self.use_denoising_batch = use_denoising_batch\n\n self.inference_time_ema = 0\n\n self.denoising_steps_num = len(self.t_list)\n if self.use_denoising_batch:\n self.batch_size = self.denoising_steps_num * self.frame_bff_size\n if self.cfg_type == \"initialize\":\n self.trt_unet_batch_size = (\n self.denoising_steps_num + 1\n ) * self.frame_bff_size\n elif self.cfg_type == \"full\":\n self.trt_unet_batch_size = (\n 2 * self.denoising_steps_num * self.frame_bff_size\n )\n else:\n self.trt_unet_batch_size = self.denoising_steps_num * self.frame_bff_size\n else:\n self.trt_unet_batch_size = self.frame_bff_size\n self.batch_size = self.frame_bff_size\n \n def load_lcm_lora(\n self,\n pretrained_model_name_or_path_or_dict: Union[\n str, Dict[str, torch.Tensor]\n ] = \"latent-consistency/lcm-lora-sdv1-5\",\n adapter_name: Optional[Any] = None,\n **kwargs,\n ) -> None:\n self.pipe.load_lora_weights(\n pretrained_model_name_or_path_or_dict, adapter_name, **kwargs\n )\n\n def load_lora(\n self,\n pretrained_lora_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n adapter_name: Optional[Any] = None,\n **kwargs,\n ) -> None:\n self.pipe.load_lora_weights(\n pretrained_lora_model_name_or_path_or_dict, adapter_name, **kwargs\n )\n\n def fuse_lora(\n self,\n fuse_unet: bool = True,\n fuse_text_encoder: bool = True,\n lora_scale: float = 1.0,\n safe_fusing: bool = False,\n ) -> None:\n self.pipe.fuse_lora(\n fuse_unet=fuse_unet,\n fuse_text_encoder=fuse_text_encoder,\n lora_scale=lora_scale,\n safe_fusing=safe_fusing,\n )\n\n def enable_similar_image_filter(self, threshold: float = 0.98, max_skip_frame: float = 10) -> None:\n self.similar_image_filter = True\n self.similar_filter.set_threshold(threshold)\n self.similar_filter.set_max_skip_frame(max_skip_frame)\n\n def disable_similar_image_filter(self) -> None:\n self.similar_image_filter = False\n\n @torch.no_grad()\n def prepare(\n self,\n prompt: str,\n negative_prompt: str = \"\",\n num_inference_steps: int = 50,\n guidance_scale: float = 1.2,\n delta: float = 1.0,\n generator: Optional[torch.Generator] = torch.Generator(),\n seed: int = 2,\n ) -> None:\n self.generator = generator\n self.generator.manual_seed(seed)\n # initialize x_t_latent (it can be any random tensor)\n if self.denoising_steps_num > 1:\n self.x_t_latent_buffer = torch.zeros(\n (\n (self.denoising_steps_num - 1) * self.frame_bff_size,\n 4,\n self.latent_height,\n self.latent_width,\n ),\n dtype=self.dtype,\n device=self.device,\n )\n else:\n self.x_t_latent_buffer = None\n\n if self.cfg_type == \"none\":\n self.guidance_scale = 1.0\n else:\n self.guidance_scale = guidance_scale\n self.delta = delta\n\n do_classifier_free_guidance = False\n if self.guidance_scale > 1.0:\n do_classifier_free_guidance = True\n\n encoder_output = self.pipe.encode_prompt(\n prompt=prompt,\n device=self.device,\n num_images_per_prompt=1,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n )\n self.prompt_embeds = encoder_output[0].repeat(self.batch_size, 1, 1)\n\n if self.use_denoising_batch and self.cfg_type == \"full\":\n uncond_prompt_embeds = encoder_output[1].repeat(self.batch_size, 1, 1)\n elif self.cfg_type == \"initialize\":\n uncond_prompt_embeds = encoder_output[1].repeat(self.frame_bff_size, 1, 1)\n\n if self.guidance_scale > 1.0 and (\n self.cfg_type == \"initialize\" or self.cfg_type == \"full\"\n ):\n self.prompt_embeds = torch.cat(\n [uncond_prompt_embeds, self.prompt_embeds], dim=0\n )\n\n self.scheduler.set_timesteps(num_inference_steps, self.device)\n self.timesteps = self.scheduler.timesteps.to(self.device)\n\n # make sub timesteps list based on the indices in the t_list list and the values in the timesteps list\n self.sub_timesteps = []\n for t in self.t_list:\n self.sub_timesteps.append(self.timesteps[t])\n\n sub_timesteps_tensor = torch.tensor(\n self.sub_timesteps, dtype=torch.long, device=self.device\n )\n self.sub_timesteps_tensor = torch.repeat_interleave(\n sub_timesteps_tensor,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n\n self.init_noise = torch.randn(\n (self.batch_size, 4, self.latent_height, self.latent_width),\n generator=generator,\n ).to(device=self.device, dtype=self.dtype)\n\n self.stock_noise = torch.zeros_like(self.init_noise)\n\n c_skip_list = []\n c_out_list = []\n for timestep in self.sub_timesteps:\n c_skip, c_out = self.scheduler.get_scalings_for_boundary_condition_discrete(\n timestep\n )\n c_skip_list.append(c_skip)\n c_out_list.append(c_out)\n\n self.c_skip = (\n torch.stack(c_skip_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n self.c_out = (\n torch.stack(c_out_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n\n alpha_prod_t_sqrt_list = []\n beta_prod_t_sqrt_list = []\n for timestep in self.sub_timesteps:\n alpha_prod_t_sqrt = self.scheduler.alphas_cumprod[timestep].sqrt()\n beta_prod_t_sqrt = (1 - self.scheduler.alphas_cumprod[timestep]).sqrt()\n alpha_prod_t_sqrt_list.append(alpha_prod_t_sqrt)\n beta_prod_t_sqrt_list.append(beta_prod_t_sqrt)\n alpha_prod_t_sqrt = (\n torch.stack(alpha_prod_t_sqrt_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n beta_prod_t_sqrt = (\n torch.stack(beta_prod_t_sqrt_list)\n .view(len(self.t_list), 1, 1, 1)\n .to(dtype=self.dtype, device=self.device)\n )\n self.alpha_prod_t_sqrt = torch.repeat_interleave(\n alpha_prod_t_sqrt,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n self.beta_prod_t_sqrt = torch.repeat_interleave(\n beta_prod_t_sqrt,\n repeats=self.frame_bff_size if self.use_denoising_batch else 1,\n dim=0,\n )\n\n @torch.no_grad()\n def update_prompt(self, prompt: str,negative_prompt: Optional[str] = None) -> None:\n encoder_output = self.pipe.encode_prompt(\n prompt=prompt,\n negative_prompt=negative_prompt,\n device=self.device,\n num_images_per_prompt=1,\n do_classifier_free_guidance=False,\n )\n self.prompt_embeds = encoder_output[0].repeat(self.batch_size, 1, 1)\n\n def add_noise(\n self,\n original_samples: torch.Tensor,\n noise: torch.Tensor,\n t_index: int,\n ) -> torch.Tensor:\n noisy_samples = (\n self.alpha_prod_t_sqrt[t_index] * original_samples\n + self.beta_prod_t_sqrt[t_index] * noise\n )\n return noisy_samples\n\n def scheduler_step_batch(\n self,\n model_pred_batch: torch.Tensor,\n x_t_latent_batch: torch.Tensor,\n idx: Optional[int] = None,\n ) -> torch.Tensor:\n # TODO: use t_list to select beta_prod_t_sqrt\n if idx is None:\n F_theta = (\n x_t_latent_batch - self.beta_prod_t_sqrt * model_pred_batch\n ) / self.alpha_prod_t_sqrt\n denoised_batch = self.c_out * F_theta + self.c_skip * x_t_latent_batch\n else:\n F_theta = (\n x_t_latent_batch - self.beta_prod_t_sqrt[idx] * model_pred_batch\n ) / self.alpha_prod_t_sqrt[idx]\n denoised_batch = (\n self.c_out[idx] * F_theta + self.c_skip[idx] * x_t_latent_batch\n )\n\n return denoised_batch\n\n def unet_step(\n self,\n x_t_latent: torch.Tensor,\n t_list: Union[torch.Tensor, list[int]],\n idx: Optional[int] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.guidance_scale > 1.0 and (self.cfg_type == \"initialize\"):\n x_t_latent_plus_uc = torch.concat([x_t_latent[0:1], x_t_latent], dim=0)\n t_list = torch.concat([t_list[0:1], t_list], dim=0)\n elif self.guidance_scale > 1.0 and (self.cfg_type == \"full\"):\n x_t_latent_plus_uc = torch.concat([x_t_latent, x_t_latent], dim=0)\n t_list = torch.concat([t_list, t_list], dim=0)\n else:\n x_t_latent_plus_uc = x_t_latent\n\n model_pred = self.unet(\n x_t_latent_plus_uc,\n t_list,\n encoder_hidden_states=self.prompt_embeds,\n return_dict=False,\n )[0]\n\n if self.guidance_scale > 1.0 and (self.cfg_type == \"initialize\"):\n noise_pred_text = model_pred[1:]\n self.stock_noise = torch.concat(\n [model_pred[0:1], self.stock_noise[1:]], dim=0\n ) # ここコメントアウトでself out cfg\n elif self.guidance_scale > 1.0 and (self.cfg_type == \"full\"):\n noise_pred_uncond, noise_pred_text = model_pred.chunk(2)\n else:\n noise_pred_text = model_pred\n if self.guidance_scale > 1.0 and (\n self.cfg_type == \"self\" or self.cfg_type == \"initialize\"\n ):\n noise_pred_uncond = self.stock_noise * self.delta\n if self.guidance_scale > 1.0 and self.cfg_type != \"none\":\n model_pred = noise_pred_uncond + self.guidance_scale * (\n noise_pred_text - noise_pred_uncond\n )\n else:\n model_pred = noise_pred_text\n\n # compute the previous noisy sample x_t -> x_t-1\n if self.use_denoising_batch:\n denoised_batch = self.scheduler_step_batch(model_pred, x_t_latent, idx)\n if self.cfg_type == \"self\" or self.cfg_type == \"initialize\":\n scaled_noise = self.beta_prod_t_sqrt * self.stock_noise\n delta_x = self.scheduler_step_batch(model_pred, scaled_noise, idx)\n alpha_next = torch.concat(\n [\n self.alpha_prod_t_sqrt[1:],\n torch.ones_like(self.alpha_prod_t_sqrt[0:1]),\n ],\n dim=0,\n )\n delta_x = alpha_next * delta_x\n beta_next = torch.concat(\n [\n self.beta_prod_t_sqrt[1:],\n torch.ones_like(self.beta_prod_t_sqrt[0:1]),\n ],\n dim=0,\n )\n delta_x = delta_x / beta_next\n init_noise = torch.concat(\n [self.init_noise[1:], self.init_noise[0:1]], dim=0\n )\n self.stock_noise = init_noise + delta_x\n\n else:\n # denoised_batch = self.scheduler.step(model_pred, t_list[0], x_t_latent).denoised\n denoised_batch = self.scheduler_step_batch(model_pred, x_t_latent, idx)\n\n return denoised_batch, model_pred\n\n def encode_image(self, image_tensors: torch.Tensor) -> torch.Tensor:\n image_tensors = image_tensors.to(\n device=self.device,\n dtype=self.vae.dtype,\n )\n img_latent = retrieve_latents(self.vae.encode(image_tensors), self.generator)\n img_latent = img_latent * self.vae.config.scaling_factor\n x_t_latent = self.add_noise(img_latent, self.init_noise[0], 0)\n return x_t_latent\n\n def decode_image(self, x_0_pred_out: torch.Tensor) -> torch.Tensor:\n output_latents = self.vae.decode(\n x_0_pred_out / self.vae.config.scaling_factor, return_dict=False\n )\n output_latent =output_latents[0]\n return output_latent\n\n def predict_x0_batch(self, x_t_latent: torch.Tensor) -> torch.Tensor:\n prev_latent_batch = self.x_t_latent_buffer\n\n if self.use_denoising_batch:\n t_list = self.sub_timesteps_tensor\n if self.denoising_steps_num > 1:\n x_t_latent = torch.cat((x_t_latent, prev_latent_batch), dim=0)\n self.stock_noise = torch.cat(\n (self.init_noise[0:1], self.stock_noise[:-1]), dim=0\n )\n x_0_pred_batch, model_pred = self.unet_step(x_t_latent, t_list)\n\n if self.denoising_steps_num > 1:\n x_0_pred_out = x_0_pred_batch[-1].unsqueeze(0)\n if self.do_add_noise:\n self.x_t_latent_buffer = (\n self.alpha_prod_t_sqrt[1:] * x_0_pred_batch[:-1]\n + self.beta_prod_t_sqrt[1:] * self.init_noise[1:]\n )\n else:\n self.x_t_latent_buffer = (\n self.alpha_prod_t_sqrt[1:] * x_0_pred_batch[:-1]\n )\n else:\n x_0_pred_out = x_0_pred_batch\n self.x_t_latent_buffer = None\n else:\n self.init_noise = x_t_latent\n for idx, t in enumerate(self.sub_timesteps_tensor):\n t = t.view(\n 1,\n ).repeat(\n self.frame_bff_size,\n )\n x_0_pred, model_pred = self.unet_step(x_t_latent, t, idx)\n if idx < len(self.sub_timesteps_tensor) - 1:\n if self.do_add_noise:\n x_t_latent = self.alpha_prod_t_sqrt[\n idx + 1\n ] * x_0_pred + self.beta_prod_t_sqrt[\n idx + 1\n ] * torch.randn_like(\n x_0_pred, device=self.device, dtype=self.dtype\n )\n else:\n x_t_latent = self.alpha_prod_t_sqrt[idx + 1] * x_0_pred\n x_0_pred_out = x_0_pred\n\n return x_0_pred_out\n\n @torch.no_grad()\n def __call__(\n self, x: Union[torch.Tensor, PIL.Image.Image, np.ndarray] = None\n ) -> torch.Tensor:\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n if x is not None:\n x = self.image_processor.preprocess(x, self.height, self.width).to(\n device=self.device, dtype=self.dtype\n )\n if self.similar_image_filter:\n x = self.similar_filter(x)\n if x is None:\n time.sleep(self.inference_time_ema)\n return self.prev_image_result\n x_t_latent = self.encode_image(x)\n else:\n # TODO: check the dimension of x_t_latent\n x_t_latent = torch.randn((1, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n x_0_pred_out = self.predict_x0_batch(x_t_latent)\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n\n self.prev_image_result = x_output\n end.record()\n torch.cuda.synchronize()\n inference_time = start.elapsed_time(end) / 1000\n self.inference_time_ema = 0.9 * self.inference_time_ema + 0.1 * inference_time\n return x_output\n \n @torch.no_grad()\n def sample(\n self, x: Union[torch.Tensor, PIL.Image.Image, np.ndarray] = None\n ) -> torch.Tensor:\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n if x is not None:\n x = self.image_processor.preprocess(x, self.height, self.width).to(\n device=self.device, dtype=self.dtype\n )\n if self.similar_image_filter:\n x = self.similar_filter(x)\n if x is None:\n time.sleep(self.inference_time_ema)\n return self.prev_image_result\n x_t_latent = self.encode_image(x)\n b,c,h,w=x_t_latent.shape\n \n # x_t_latent=x_t_latent.repeat((2, 1,1,1))\n else:\n # TODO: check the dimension of x_t_latent\n x_t_latent = torch.randn((self.frame_bff_size, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n x_0_pred_out = self.predict_x0_batch(x_t_latent)\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n self.prev_image_result = x_output\n end.record()\n torch.cuda.synchronize()\n inference_time = start.elapsed_time(end) / 1000\n self.inference_time_ema = 0.9 * self.inference_time_ema + 0.1 * inference_time\n return x_output\n \n\n @torch.no_grad()\n def txt2img(self, batch_size: int = 1) -> torch.Tensor:\n x_0_pred_out = self.predict_x0_batch(\n torch.randn((batch_size, 4, self.latent_height, self.latent_width)).to(\n device=self.device, dtype=self.dtype\n )\n )\n x_output = self.decode_image(x_0_pred_out).detach().clone()\n return x_output\n\n def txt2img_sd_turbo(self, batch_size: int = 1) -> torch.Tensor:\n x_t_latent = torch.randn(\n (batch_size, 4, self.latent_height, self.latent_width),\n device=self.device,\n dtype=self.dtype,\n )\n model_pred = self.unet(\n x_t_latent,\n self.sub_timesteps_tensor,\n encoder_hidden_states=self.prompt_embeds,\n return_dict=False,\n )[0]\n x_0_pred_out = (\n x_t_latent - self.beta_prod_t_sqrt * model_pred\n ) / self.alpha_prod_t_sqrt\n return self.decode_image(x_0_pred_out)"
},
{
"identifier": "postprocess_image",
"path": "streamdiffusion/image_utils.py",
"snippet": "def postprocess_image(\n image: torch.Tensor,\n output_type: str = \"pil\",\n do_denormalize: Optional[List[bool]] = None,\n) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image]:\n if not isinstance(image, torch.Tensor):\n raise ValueError(\n f\"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor\"\n )\n\n if output_type == \"latent\":\n return image\n\n do_normalize_flg = True\n if do_denormalize is None:\n do_denormalize = [do_normalize_flg] * image.shape[0]\n\n image = torch.stack(\n [\n denormalize(image[i]) if do_denormalize[i] else image[i]\n for i in range(image.shape[0])\n ]\n )\n\n if output_type == \"pt\":\n return image\n\n image = pt_to_numpy(image)\n\n if output_type == \"np\":\n return image\n\n if output_type == \"pil\":\n return numpy_to_pil(image)"
}
] | import gc
import os
import traceback
import numpy as np
import torch
from pathlib import Path
from typing import List, Literal, Optional, Union, Dict
from diffusers import AutoencoderTiny, StableDiffusionPipeline
from PIL import Image
from .pipeline import StreamDiffusion
from .image_utils import postprocess_image
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
) | 7,470 | width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
|
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
| self.stream: StreamDiffusion = self._load_model( | 0 | 2023-12-29 09:00:03+00:00 | 12k |
neobundy/MLX-Stable-Diffusion-WebUI | stable_diffusion/model_io.py | [
{
"identifier": "CLIPTextModel",
"path": "stable_diffusion/clip.py",
"snippet": "class CLIPTextModel(nn.Module):\n \"\"\"Implements the text encoder transformer from CLIP.\"\"\"\n\n def __init__(self, config: CLIPTextModelConfig):\n super().__init__()\n\n self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)\n self.position_embedding = nn.Embedding(config.max_length, config.model_dims)\n self.layers = [\n CLIPEncoderLayer(config.model_dims, config.num_heads)\n for i in range(config.num_layers)\n ]\n self.final_layer_norm = nn.LayerNorm(config.model_dims)\n\n def __call__(self, x):\n # Extract some shapes\n B, N = x.shape\n\n # Compute the embeddings\n x = self.token_embedding(x)\n x = x + self.position_embedding.weight[:N]\n\n # Compute the features from the transformer\n mask = nn.MultiHeadAttention.create_additive_causal_mask(N, x.dtype)\n for l in self.layers:\n x = l(x, mask)\n\n # Apply the final layernorm and return\n return self.final_layer_norm(x)"
},
{
"identifier": "AutoencoderConfig",
"path": "stable_diffusion/config.py",
"snippet": "class AutoencoderConfig(BaseConfig):\n in_channels: int = 3\n out_channels: int = 3\n latent_channels_out: int = 8\n latent_channels_in: int = 4\n block_out_channels: Tuple[int] = (128, 256, 512, 512)\n layers_per_block: int = 2\n norm_num_groups: int = 32\n scaling_factor: float = 0.18215"
},
{
"identifier": "CLIPTextModelConfig",
"path": "stable_diffusion/config.py",
"snippet": "class CLIPTextModelConfig(BaseConfig):\n num_layers: int = 23\n model_dims: int = 1024\n num_heads: int = 16\n max_length: int = 77\n vocab_size: int = 49408"
},
{
"identifier": "DiffusionConfig",
"path": "stable_diffusion/config.py",
"snippet": "class DiffusionConfig(BaseConfig):\n beta_schedule: str = \"scaled_linear\"\n beta_start: float = 0.00085\n beta_end: float = 0.012\n num_train_steps: int = 1000"
},
{
"identifier": "UNetConfig",
"path": "stable_diffusion/config.py",
"snippet": "class UNetConfig(BaseConfig):\n in_channels: int = 4\n out_channels: int = 4\n conv_in_kernel: int = 3\n conv_out_kernel: int = 3\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280)\n layers_per_block: Tuple[int] = (2, 2, 2, 2)\n mid_block_layers: int = 2\n transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)\n num_attention_heads: Tuple[int] = (5, 10, 20, 20)\n cross_attention_dim: Tuple[int] = (1024,) * 4\n norm_num_groups: int = 32"
},
{
"identifier": "Tokenizer",
"path": "stable_diffusion/tokenizer.py",
"snippet": "class Tokenizer:\n \"\"\"A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ .\"\"\"\n\n def __init__(self, bpe_ranks, vocab):\n self.bpe_ranks = bpe_ranks\n self.vocab = vocab\n self.pat = regex.compile(\n r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n regex.IGNORECASE,\n )\n\n self._cache = {self.bos: self.bos, self.eos: self.eos}\n\n @property\n def bos(self):\n return \"<|startoftext|>\"\n\n @property\n def bos_token(self):\n return self.vocab[self.bos]\n\n @property\n def eos(self):\n return \"<|endoftext|>\"\n\n @property\n def eos_token(self):\n return self.vocab[self.eos]\n\n def bpe(self, text):\n if text in self._cache:\n return self._cache[text]\n\n unigrams = list(text[:-1]) + [text[-1] + \"</w>\"]\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n if not unique_bigrams:\n return unigrams\n\n # In every iteration try to merge the two most likely bigrams. If none\n # was merged we are done.\n #\n # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py\n while unique_bigrams:\n bigram = min(\n unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n\n new_unigrams = []\n skip = False\n for a, b in zip(unigrams, unigrams[1:]):\n if skip:\n skip = False\n continue\n\n if (a, b) == bigram:\n new_unigrams.append(a + b)\n skip = True\n\n else:\n new_unigrams.append(a)\n\n if not skip:\n new_unigrams.append(b)\n\n unigrams = new_unigrams\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n self._cache[text] = unigrams\n\n return unigrams\n\n def tokenize(self, text, prepend_bos=True, append_eos=True):\n if isinstance(text, list):\n return [self.tokenize(t, prepend_bos, append_eos) for t in text]\n\n # Lower case cleanup and split according to self.pat. Hugging Face does\n # a much more thorough job here but this should suffice for 95% of\n # cases.\n clean_text = regex.sub(r\"\\s+\", \" \", text.lower())\n tokens = regex.findall(self.pat, clean_text)\n\n # Split the tokens according to the byte-pair merge file\n bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]\n\n # Map to token ids and return\n tokens = [self.vocab[t] for t in bpe_tokens]\n if prepend_bos:\n tokens = [self.bos_token] + tokens\n if append_eos:\n tokens.append(self.eos_token)\n\n return tokens"
},
{
"identifier": "UNetModel",
"path": "stable_diffusion/unet.py",
"snippet": "class UNetModel(nn.Module):\n \"\"\"The conditional 2D UNet model that actually performs the denoising.\"\"\"\n\n def __init__(self, config: UNetConfig):\n super().__init__()\n\n self.conv_in = nn.Conv2d(\n config.in_channels,\n config.block_out_channels[0],\n config.conv_in_kernel,\n padding=(config.conv_in_kernel - 1) // 2,\n )\n\n # Generate sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n self.timesteps = nn.SinusoidalPositionalEncoding(\n config.block_out_channels[0],\n max_freq=1,\n min_freq=math.exp(\n -math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]\n ),\n scale=1.0,\n cos_first=True,\n full_turns=False,\n )\n self.time_embedding = TimestepEmbedding(\n config.block_out_channels[0],\n config.block_out_channels[0] * 4,\n )\n\n # Make the downsampling blocks\n block_channels = [config.block_out_channels[0]] + list(\n config.block_out_channels\n )\n self.down_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n num_layers=config.layers_per_block[i],\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=(i < len(config.block_out_channels) - 1),\n add_upsample=False,\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels) in enumerate(\n zip(block_channels, block_channels[1:])\n )\n ]\n\n # Make the middle block\n self.mid_blocks = [\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n Transformer2D(\n in_channels=config.block_out_channels[-1],\n model_dims=config.block_out_channels[-1],\n num_heads=config.num_attention_heads[-1],\n num_layers=config.transformer_layers_per_block[-1],\n encoder_dims=config.cross_attention_dim[-1],\n ),\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n ]\n\n # Make the upsampling blocks\n block_channels = (\n [config.block_out_channels[0]]\n + list(config.block_out_channels)\n + [config.block_out_channels[-1]]\n )\n self.up_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n prev_out_channels=prev_out_channels,\n num_layers=config.layers_per_block[i] + 1,\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=False,\n add_upsample=(i > 0),\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels, prev_out_channels) in reversed(\n list(\n enumerate(\n zip(block_channels, block_channels[1:], block_channels[2:])\n )\n )\n )\n ]\n\n self.conv_norm_out = nn.GroupNorm(\n config.norm_num_groups,\n config.block_out_channels[0],\n pytorch_compatible=True,\n )\n self.conv_out = nn.Conv2d(\n config.block_out_channels[0],\n config.out_channels,\n config.conv_out_kernel,\n padding=(config.conv_out_kernel - 1) // 2,\n )\n\n def __call__(self, x, timestep, encoder_x, attn_mask=None, encoder_attn_mask=None):\n\n # Get the sinusoidal positional encoding for the given timestep.\n # The self.timesteps object is an instance of the nn.SinusoidalPositionalEncoding class, which generates sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n # The astype(x.dtype) part is ensuring that the positional encoding has the same data type as the input tensor x.\n\n temb = self.timesteps(timestep).astype(x.dtype)\n temb = self.time_embedding(temb)\n\n # Preprocess the input\n x = self.conv_in(x)\n\n # Run the downsampling part of the unet\n residuals = [x]\n for block in self.down_blocks:\n x, res = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n )\n residuals.extend(res)\n\n # Run the middle part of the unet\n x = self.mid_blocks[0](x, temb)\n x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)\n x = self.mid_blocks[2](x, temb)\n\n # Run the upsampling part of the unet\n for block in self.up_blocks:\n x, _ = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n residual_hidden_states=residuals,\n )\n\n # Postprocess the output\n x = self.conv_norm_out(x)\n x = nn.silu(x)\n x = self.conv_out(x)\n\n return x"
},
{
"identifier": "Autoencoder",
"path": "stable_diffusion/vae.py",
"snippet": "class Autoencoder(nn.Module):\n \"\"\"The autoencoder that allows us to perform diffusion in the latent space.\"\"\"\n\n def __init__(self, config: AutoencoderConfig):\n super().__init__()\n\n self.latent_channels = config.latent_channels_in\n self.scaling_factor = config.scaling_factor\n self.encoder = Encoder(\n config.in_channels,\n config.latent_channels_out,\n config.block_out_channels,\n config.layers_per_block,\n resnet_groups=config.norm_num_groups,\n )\n self.decoder = Decoder(\n config.latent_channels_in,\n config.out_channels,\n config.block_out_channels,\n config.layers_per_block + 1,\n resnet_groups=config.norm_num_groups,\n )\n\n self.quant_proj = nn.Linear(\n config.latent_channels_out, config.latent_channels_out\n )\n self.post_quant_proj = nn.Linear(\n config.latent_channels_in, config.latent_channels_in\n )\n\n def encode(self, x):\n x = self.encoder(x)\n\n # This line applies the linear transformation to the tensor x.\n # The purpose of this operation is to transform the features extracted by the encoder into a form suitable for quantization.\n # In this case, the transformation doesn't change the dimensionality of the data (as both input and output dimensions are config.latent_channels_out),\n # but it can still learn to make the data more suitable for the subsequent operations (like splitting into mean and logvar).\n # The term \"projection\" in quant_proj refers to the operation of applying a linear transformation to the data,\n # which can be thought of as \"projecting\" the data onto a different subspace. This is a common operation in machine learning models,\n # and it is used here to transform the data into a form that is suitable for the subsequent operations in the VAE.\n x = self.quant_proj(x)\n\n # two tensors of size (B, C, H, W) where C = latent_channels_in\n mean, logvar = x.split(2, axis=-1)\n mean = mean * self.scaling_factor\n logvar = logvar + 2 * math.log(self.scaling_factor)\n\n return mean, logvar\n\n def decode(self, z):\n z = z / self.scaling_factor\n return self.decoder(self.post_quant_proj(z))\n\n def __call__(self, x, key=None):\n mean, logvar = self.encode(x)\n z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean\n x_hat = self.decode(z)\n\n return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)"
},
{
"identifier": "_DEFAULT_MODEL",
"path": "stable_diffusion/models.py",
"snippet": "_DEFAULT_MODEL = _AVAILABLE_MODELS[0]"
},
{
"identifier": "_MODELS",
"path": "stable_diffusion/models.py",
"snippet": "_MODELS = {model: generate_model_dict() for model in _AVAILABLE_MODELS}"
},
{
"identifier": "DiffuserModelPathConfig",
"path": "stable_diffusion/config.py",
"snippet": "class DiffuserModelPathConfig:\n def __init__(self, model_path: str = \"./diffuser_models\"):\n self.model_path = model_path\n\n @property\n def unet_config(self):\n return self.model_path + \"/unet/config.json\"\n\n @property\n def unet(self):\n return self.model_path + \"/unet/diffusion_pytorch_model.safetensors\"\n\n @property\n def scheduler(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def text_encoder_config(self):\n return self.model_path + \"/text_encoder/config.json\"\n\n @property\n def text_encoder(self):\n return self.model_path + \"/text_encoder/model.safetensors\"\n\n @property\n def vae_config(self):\n return self.model_path + \"/vae/config.json\"\n\n @property\n def vae(self):\n return self.model_path + \"/vae/diffusion_pytorch_model.safetensors\"\n\n @property\n def diffusion_config(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def tokenizer_vocab(self):\n return self.model_path + \"/tokenizer/vocab.json\"\n\n @property\n def tokenizer_merges(self):\n return self.model_path + \"/tokenizer/merges.txt\""
}
] | from typing import Optional
from functools import partial
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from safetensors import safe_open as safetensor_open
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
from .models import _DEFAULT_MODEL, _MODELS
from .config import DiffuserModelPathConfig
from tqdm import tqdm
import json
import mlx.core as mx
import numpy as np | 7,581 | def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
| # Copyright © 2023 Apple Inc.
logfile = 'log.txt'
_DEBUG = False
def _debug_print(*args, **kwargs):
if _DEBUG:
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(logfile, 'a') as f:
f.write(message + '\n')
def _from_numpy(x):
return mx.array(np.ascontiguousarray(x))
# The `map_*_weights` functions are used to adjust the weights of a model when loading it from a file.
# The weights of the model in the file might be in a different format than the weights of the model in the current codebase.
# When you load a pre-trained model, the weights are stored in a dictionary where the keys are the names of the parameters in the model.
# If the architecture of your model is different from the architecture of the model that the weights were trained on, you might need to adjust the keys and/or the weights to match your model's architecture.
# This is what the `map_*_weights` functions are doing. They are adjusting the keys and the weights to match the architecture of the models in the current codebase.
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
_debug_print(f"Replaced 'ff.net.2' with 'linear3' in {key}")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = np.split(value, 2)
_debug_print(f"Replaced 'ff.net.0.proj' with 'linear1' and 'linear2' in {key}")
return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))]
# The weights of this 1x1 convolutional layer would be a 4-dimensional tensor
# with shape [out_channels, in_channels, 1, 1].
# The squeeze() function is used to remove the dimensions of size 1 from this tensor,
# converting it to a 2-dimensional tensor with shape [out_channels, in_channels].
# This is because the corresponding layer in the current model might be a linear layer
# rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor.
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
_debug_print(f"Squeezed 'proj_in' or 'proj_out' in {key}")
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
_debug_print(f"Removed 'text_model.' prefix from {key}")
if key.startswith("embeddings."):
key = key[11:]
_debug_print(f"Removed 'embeddings.' prefix from {key}")
if key.startswith("encoder."):
key = key[8:]
_debug_print(f"Removed 'encoder.' prefix from {key}")
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
_debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
_debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
_debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
_debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
_debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
_debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}")
return [(key, _from_numpy(value))]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
| return DiffusionConfig( | 3 | 2023-12-25 05:49:34+00:00 | 12k |
Con6924/SPM | train_spm_xl.py | [
{
"identifier": "SPMNetwork",
"path": "src/models/spm.py",
"snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0"
},
{
"identifier": "SPMLayer",
"path": "src/models/spm.py",
"snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )"
},
{
"identifier": "sample_xl",
"path": "src/engine/sampling.py",
"snippet": "def sample_xl(prompt_pair: PromptEmbedsPair, tokenizers=None, text_encoders=None):\n res = []\n for unconditional, target in zip(\n [prompt_pair.unconditional.text_embeds, prompt_pair.unconditional.pooled_embeds],\n [prompt_pair.target.text_embeds, prompt_pair.target.pooled_embeds]\n ):\n samples = []\n while len(samples) < prompt_pair.sampling_batch_size:\n while True:\n # sample from gaussian distribution\n noise = torch.randn_like(target)\n # normalize the noise\n noise = noise / noise.view(-1).norm(dim=-1)\n # compute the similarity\n sim = torch.cosine_similarity(target.view(-1), noise.view(-1), dim=-1)\n # the possibility of accepting the sample = 1 - sim\n if random.random() < 1 - sim:\n break\n scale = random.random() * 0.4 + 0.8\n sample = scale * noise * target.view(-1).norm(dim=-1)\n samples.append(sample)\n \n samples = [torch.cat([unconditional, s]) for s in samples]\n samples = torch.cat(samples, dim=0)\n res.append(samples)\n \n return res"
},
{
"identifier": "model_util",
"path": "src/models/model_util.py",
"snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:"
},
{
"identifier": "eval_util",
"path": "src/evaluation/eval_util.py",
"snippet": "def get_clip_preprocess(n_px=224):\n def Convert(image):\n def text_preprocess(text):\ndef clip_score(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n texts: str,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n cross_matching: bool = False,\n):\ndef clip_accuracy(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval_by_image(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval(\n pipe: DiffusionPipeline,\n config: RootConfig,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):"
},
{
"identifier": "config",
"path": "src/configs/config.py",
"snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:"
},
{
"identifier": "prompt",
"path": "src/configs/prompt.py",
"snippet": "ACTION_TYPES = Literal[\n \"erase\",\n \"erase_with_la\",\n]\nPROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]\nclass PromptEmbedsXL:\nclass PromptEmbedsCache:\nclass PromptSettings(BaseModel): # yaml\nclass PromptEmbedsPair:\n def __init__(self, embeds) -> None:\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n def fill_prompts(cls, values):\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n def loss(\n self,\n **kwargs,\n ):\ndef load_prompts_from_yaml(path: str | Path) -> list[PromptSettings]:\ndef load_prompts_from_table(path: str | Path) -> list[PromptSettings]:\ndef compute_rotation_matrix(target: torch.FloatTensor):"
},
{
"identifier": "RootConfig",
"path": "src/configs/config.py",
"snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None"
},
{
"identifier": "PromptEmbedsCache",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsCache:\n prompts: dict[str, PROMPT_EMBEDDING] = {}\n\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n self.prompts[__name] = __value\n\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n if __name in self.prompts:\n return self.prompts[__name]\n else:\n return None"
},
{
"identifier": "PromptEmbedsPair",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsPair:\n target: PROMPT_EMBEDDING # the concept that do not want to generate \n positive: PROMPT_EMBEDDING # generate the concept\n unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)\n neutral: PROMPT_EMBEDDING # base condition (default should be empty)\n use_template: bool = False # use clip template or not\n\n guidance_scale: float\n resolution: int\n dynamic_resolution: bool\n batch_size: int\n dynamic_crops: bool\n\n loss_fn: torch.nn.Module\n action: ACTION_TYPES\n\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n self.loss_fn = loss_fn\n self.target = target\n self.positive = positive\n self.unconditional = unconditional\n self.neutral = neutral\n \n self.settings = settings\n\n self.use_template = settings.use_template\n self.guidance_scale = settings.guidance_scale\n self.resolution = settings.resolution\n self.dynamic_resolution = settings.dynamic_resolution\n self.batch_size = settings.batch_size\n self.dynamic_crops = settings.dynamic_crops\n self.action = settings.action\n \n self.la_strength = settings.la_strength\n self.sampling_batch_size = settings.sampling_batch_size\n \n \n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n \"\"\"\n Prepare embeddings for training. When use_template is True, the embeddings will be\n format using a template, and then be processed by the model.\n \"\"\"\n if not self.use_template:\n return\n template = random.choice(imagenet_templates)\n target_prompt = template.format(self.settings.target)\n if cache[target_prompt]:\n self.target = cache[target_prompt]\n else:\n self.target = encode_prompts(tokenizer, text_encoder, [target_prompt])\n \n \n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n \"\"\"Target latents are going not to have the positive concept.\"\"\"\n\n erase_loss = self.loss_fn(\n target_latents,\n neutral_latents\n - self.guidance_scale * (positive_latents - neutral_latents),\n )\n losses = {\n \"loss\": erase_loss,\n \"loss/erase\": erase_loss,\n }\n return losses\n \n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n anchoring_loss = self.loss_fn(anchor_latents, anchor_latents_ori)\n erase_loss = self._erase(\n target_latents=target_latents,\n positive_latents=positive_latents,\n neutral_latents=neutral_latents,\n )[\"loss/erase\"]\n losses = {\n \"loss\": erase_loss + self.la_strength * anchoring_loss,\n \"loss/erase\": erase_loss,\n \"loss/anchoring\": anchoring_loss\n }\n return losses\n\n def loss(\n self,\n **kwargs,\n ):\n if self.action == \"erase\":\n return self._erase(**kwargs)\n elif self.action == \"erase_with_la\":\n return self._erase_with_la(**kwargs)\n else:\n raise ValueError(\"action must be erase or erase_with_la\")"
},
{
"identifier": "PromptSettings",
"path": "src/configs/prompt.py",
"snippet": "class PromptSettings(BaseModel): # yaml\n target: str\n positive: str = None # if None, target will be used\n unconditional: str = \"\" # default is \"\"\n neutral: str = None # if None, unconditional will be used\n action: ACTION_TYPES = \"erase\" # default is \"erase\"\n guidance_scale: float = 1.0 # default is 1.0\n resolution: int = 512 # default is 512\n dynamic_resolution: bool = False # default is False\n batch_size: int = 1 # default is 1\n dynamic_crops: bool = False # default is False. only used when model is XL\n use_template: bool = False # default is False\n \n la_strength: float = 1000.0\n sampling_batch_size: int = 4\n\n seed: int = None\n case_number: int = 0\n\n @root_validator(pre=True)\n def fill_prompts(cls, values):\n keys = values.keys()\n if \"target\" not in keys:\n raise ValueError(\"target must be specified\")\n if \"positive\" not in keys:\n values[\"positive\"] = values[\"target\"]\n if \"unconditional\" not in keys:\n values[\"unconditional\"] = \"\"\n if \"neutral\" not in keys:\n values[\"neutral\"] = values[\"unconditional\"]\n\n return values"
},
{
"identifier": "PromptEmbedsXL",
"path": "src/configs/prompt.py",
"snippet": "class PromptEmbedsXL:\n text_embeds: torch.FloatTensor\n pooled_embeds: torch.FloatTensor\n\n def __init__(self, embeds) -> None:\n self.text_embeds, self.pooled_embeds = embeds"
}
] | import argparse
import gc
import torch
import src.engine.train_util as train_util
import wandb
from pathlib import Path
from tqdm import tqdm
from src.models.spm import (
SPMNetwork,
SPMLayer,
)
from src.engine.sampling import sample_xl
from src.models import model_util
from src.evaluation import eval_util
from src.configs import config as config_pkg
from src.configs import prompt as prompt_pkg
from src.configs.config import RootConfig
from src.configs.prompt import PromptEmbedsCache, PromptEmbedsPair, PromptSettings, PromptEmbedsXL | 7,349 | text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
| # ref:
# - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566
# - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py
# - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py
DEVICE_CUDA = torch.device("cuda:0")
NUM_IMAGES_PER_PROMPT = 1
def flush():
torch.cuda.empty_cache()
gc.collect()
def train(
config: RootConfig,
prompts: list[PromptSettings],
):
metadata = {
"prompts": ",".join([prompt.json() for prompt in prompts]),
"config": config.json(),
}
model_metadata = {
"prompts": ",".join([prompt.target for prompt in prompts]),
"rank": str(config.network.rank),
"alpha": str(config.network.alpha),
}
save_path = Path(config.save.path)
if config.logging.verbose:
print(metadata)
weight_dtype = config_pkg.parse_precision(config.train.precision)
save_weight_dtype = config_pkg.parse_precision(config.train.precision)
if config.logging.use_wandb:
wandb.init(project=f"SPM",
config=metadata,
name=config.logging.run_name,
settings=wandb.Settings(symlink=False))
(
tokenizers,
text_encoders,
unet,
noise_scheduler,
pipe
) = model_util.load_models_xl(
config.pretrained_model.name_or_path,
scheduler_name=config.train.noise_scheduler,
)
for text_encoder in text_encoders:
text_encoder.to(DEVICE_CUDA, dtype=weight_dtype)
text_encoder.requires_grad_(False)
text_encoder.eval()
unet.to(DEVICE_CUDA, dtype=weight_dtype)
unet.enable_xformers_memory_efficient_attention()
unet.requires_grad_(False)
unet.eval()
network = SPMNetwork(
unet,
rank=config.network.rank,
multiplier=1.0,
alpha=config.network.alpha,
module=SPMLayer,
).to(DEVICE_CUDA, dtype=weight_dtype)
trainable_params = network.prepare_optimizer_params(
config.train.text_encoder_lr, config.train.unet_lr, config.train.lr
)
optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(
config, trainable_params
)
lr_scheduler = train_util.get_scheduler_fix(config, optimizer)
criteria = torch.nn.MSELoss()
print("Prompts")
for settings in prompts:
print(settings)
cache = PromptEmbedsCache()
prompt_pairs: list[PromptEmbedsPair] = []
with torch.no_grad():
for settings in prompts:
for prompt in [
settings.target,
settings.positive,
settings.neutral,
settings.unconditional,
]:
if cache[prompt] == None:
cache[prompt] = PromptEmbedsXL(
train_util.encode_prompts_xl(
tokenizers,
text_encoders,
[prompt],
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
)
)
prompt_pair = PromptEmbedsPair(
criteria,
cache[settings.target],
cache[settings.positive],
cache[settings.unconditional],
cache[settings.neutral],
settings,
)
assert prompt_pair.sampling_batch_size % prompt_pair.batch_size == 0
prompt_pairs.append(prompt_pair)
flush()
pbar = tqdm(range(config.train.iterations))
loss = None
for i in pbar:
with torch.no_grad():
noise_scheduler.set_timesteps(
config.train.max_denoising_steps, device=DEVICE_CUDA
)
optimizer.zero_grad()
prompt_pair: PromptEmbedsPair = prompt_pairs[
torch.randint(0, len(prompt_pairs), (1,)).item()
]
timesteps_to = torch.randint(
1, config.train.max_denoising_steps, (1,)
).item()
height, width = (
prompt_pair.resolution,
prompt_pair.resolution,
)
if prompt_pair.dynamic_resolution:
height, width = train_util.get_random_resolution_in_bucket(
prompt_pair.resolution
)
if config.logging.verbose:
print("guidance_scale:", prompt_pair.guidance_scale)
print("resolution:", prompt_pair.resolution)
print("dynamic_resolution:", prompt_pair.dynamic_resolution)
if prompt_pair.dynamic_resolution:
print("bucketed resolution:", (height, width))
print("batch_size:", prompt_pair.batch_size)
print("dynamic_crops:", prompt_pair.dynamic_crops)
latents = train_util.get_initial_latents(
noise_scheduler, prompt_pair.batch_size, height, width, 1
).to(DEVICE_CUDA, dtype=weight_dtype)
add_time_ids = train_util.get_add_time_ids(
height,
width,
dynamic_crops=prompt_pair.dynamic_crops,
dtype=weight_dtype,
).to(DEVICE_CUDA, dtype=weight_dtype)
with network:
denoised_latents = train_util.diffusion_xl(
unet,
noise_scheduler,
latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
start_timesteps=0,
total_timesteps=timesteps_to,
guidance_scale=3,
)
noise_scheduler.set_timesteps(1000)
current_timestep = noise_scheduler.timesteps[
int(timesteps_to * 1000 / config.train.max_denoising_steps)
]
positive_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.positive.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.positive.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
neutral_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.neutral.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.neutral.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with network:
target_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network: | clip_scores, clip_accs = eval_util.clip_eval(pipe, config) | 4 | 2023-12-26 03:19:16+00:00 | 12k |
dakpinaroglu/Frame2seq | frame2seq/model/Frame2seq.py | [
{
"identifier": "Rigid",
"path": "frame2seq/utils/rigid_utils.py",
"snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())"
},
{
"identifier": "LayerNorm",
"path": "frame2seq/openfold/model/primitives.py",
"snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out"
},
{
"identifier": "InvariantPointAttention",
"path": "frame2seq/openfold/model/structure_module.py",
"snippet": "class InvariantPointAttention(nn.Module):\n \"\"\"\n Implements Algorithm 22.\n \"\"\"\n def __init__(\n self,\n c_s: int,\n c_z: int,\n c_hidden: int,\n no_heads: int,\n no_qk_points: int,\n no_v_points: int,\n inf: float = 1e5,\n eps: float = 1e-8,\n ):\n \"\"\"\n Args:\n c_s:\n Single representation channel dimension\n c_z:\n Pair representation channel dimension\n c_hidden:\n Hidden channel dimension\n no_heads:\n Number of attention heads\n no_qk_points:\n Number of query/key points to generate\n no_v_points:\n Number of value points to generate\n \"\"\"\n super(InvariantPointAttention, self).__init__()\n\n self.c_s = c_s\n self.c_z = c_z\n self.c_hidden = c_hidden\n self.no_heads = no_heads\n self.no_qk_points = no_qk_points\n self.no_v_points = no_v_points\n self.inf = inf\n self.eps = eps\n\n # These linear layers differ from their specifications in the\n # supplement. There, they lack bias and use Glorot initialization.\n # Here as in the official source, they have bias and use the default\n # Lecun initialization.\n hc = self.c_hidden * self.no_heads\n self.linear_q = Linear(self.c_s, hc)\n self.linear_kv = Linear(self.c_s, 2 * hc)\n\n hpq = self.no_heads * self.no_qk_points * 3\n self.linear_q_points = Linear(self.c_s, hpq)\n\n hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3\n self.linear_kv_points = Linear(self.c_s, hpkv)\n\n hpv = self.no_heads * self.no_v_points * 3\n\n self.linear_b = Linear(self.c_z, self.no_heads)\n\n self.head_weights = nn.Parameter(torch.zeros((no_heads)))\n ipa_point_weights_init_(self.head_weights)\n\n concat_out_dim = self.no_heads * (\n self.c_z + self.c_hidden + self.no_v_points * 4\n )\n self.linear_out = Linear(concat_out_dim, self.c_s, init=\"final\")\n\n self.softmax = nn.Softmax(dim=-1)\n self.softplus = nn.Softplus()\n\n def forward(\n self,\n s: torch.Tensor,\n z: Optional[torch.Tensor],\n r: Rigid,\n mask: torch.Tensor,\n inplace_safe: bool = False,\n _offload_inference: bool = False,\n _z_reference_list: Optional[Sequence[torch.Tensor]] = None,\n attn_drop_rate = 0.0,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n s:\n [*, N_res, C_s] single representation\n z:\n [*, N_res, N_res, C_z] pair representation\n r:\n [*, N_res] transformation object\n mask:\n [*, N_res] mask\n Returns:\n [*, N_res, C_s] single representation update\n \"\"\"\n if(_offload_inference and inplace_safe):\n z = _z_reference_list\n else:\n z = [z]\n \n #######################################\n # Generate scalar and point activations\n #######################################\n # [*, N_res, H * C_hidden]\n q = self.linear_q(s)\n kv = self.linear_kv(s)\n\n # [*, N_res, H, C_hidden]\n q = q.view(q.shape[:-1] + (self.no_heads, -1))\n\n # [*, N_res, H, 2 * C_hidden]\n kv = kv.view(kv.shape[:-1] + (self.no_heads, -1))\n\n # [*, N_res, H, C_hidden]\n k, v = torch.split(kv, self.c_hidden, dim=-1)\n\n # [*, N_res, H * P_q * 3]\n q_pts = self.linear_q_points(s)\n\n # This is kind of clunky, but it's how the original does it\n # [*, N_res, H * P_q, 3]\n q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)\n q_pts = torch.stack(q_pts, dim=-1)\n q_pts = r[..., None].apply(q_pts)\n\n # [*, N_res, H, P_q, 3]\n q_pts = q_pts.view(\n q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3)\n )\n\n # [*, N_res, H * (P_q + P_v) * 3]\n kv_pts = self.linear_kv_points(s)\n\n # [*, N_res, H * (P_q + P_v), 3]\n kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)\n kv_pts = torch.stack(kv_pts, dim=-1)\n kv_pts = r[..., None].apply(kv_pts)\n\n # [*, N_res, H, (P_q + P_v), 3]\n kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3))\n\n # [*, N_res, H, P_q/P_v, 3]\n k_pts, v_pts = torch.split(\n kv_pts, [self.no_qk_points, self.no_v_points], dim=-2\n )\n\n ##########################\n # Compute attention scores\n ##########################\n # [*, N_res, N_res, H]\n b = self.linear_b(z[0])\n \n if(_offload_inference):\n assert(sys.getrefcount(z[0]) == 2)\n z[0] = z[0].cpu()\n\n # [*, H, N_res, N_res]\n if(is_fp16_enabled()):\n with torch.cuda.amp.autocast(enabled=False):\n a = torch.matmul(\n permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden]\n permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res]\n )\n else:\n a = torch.matmul(\n permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]\n permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res]\n )\n \n a *= math.sqrt(1.0 / (3 * self.c_hidden))\n a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1)))\n\n # [*, N_res, N_res, H, P_q, 3]\n pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)\n if(inplace_safe):\n pt_att *= pt_att\n else:\n pt_att = pt_att ** 2\n\n # [*, N_res, N_res, H, P_q]\n pt_att = sum(torch.unbind(pt_att, dim=-1))\n head_weights = self.softplus(self.head_weights).view(\n *((1,) * len(pt_att.shape[:-2]) + (-1, 1))\n )\n head_weights = head_weights * math.sqrt(\n 1.0 / (3 * (self.no_qk_points * 9.0 / 2))\n )\n if(inplace_safe):\n pt_att *= head_weights\n else:\n pt_att = pt_att * head_weights\n\n # [*, N_res, N_res, H]\n pt_att = torch.sum(pt_att, dim=-1) * (-0.5)\n # [*, N_res, N_res]\n square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)\n square_mask = self.inf * (square_mask - 1)\n\n \"\"\"\n Frame2seq implementation of IPA regularization via attention dropout\n \"\"\"\n if attn_drop_rate > 0.0:\n random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) \n random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) \n square_mask += random_square_mask\n\n # [*, H, N_res, N_res]\n pt_att = permute_final_dims(pt_att, (2, 0, 1))\n \n if(inplace_safe):\n a += pt_att\n del pt_att\n a += square_mask.unsqueeze(-3)\n # in-place softmax\n attn_core_inplace_cuda.forward_(\n a,\n reduce(mul, a.shape[:-1]),\n a.shape[-1],\n )\n else:\n a = a + pt_att \n a = a + square_mask.unsqueeze(-3)\n a = self.softmax(a)\n\n ################\n # Compute output\n ################\n # [*, N_res, H, C_hidden]\n o = torch.matmul(\n a, v.transpose(-2, -3).to(dtype=a.dtype)\n ).transpose(-2, -3)\n\n # [*, N_res, H * C_hidden]\n o = flatten_final_dims(o, 2)\n\n # [*, H, 3, N_res, P_v] \n if(inplace_safe):\n v_pts = permute_final_dims(v_pts, (1, 3, 0, 2))\n o_pt = [\n torch.matmul(a, v.to(a.dtype)) \n for v in torch.unbind(v_pts, dim=-3)\n ]\n o_pt = torch.stack(o_pt, dim=-3)\n else:\n o_pt = torch.sum(\n (\n a[..., None, :, :, None]\n * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]\n ),\n dim=-2,\n )\n\n # [*, N_res, H, P_v, 3]\n o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))\n o_pt = r[..., None, None].invert_apply(o_pt)\n\n # [*, N_res, H * P_v]\n o_pt_norm = flatten_final_dims(\n torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2\n )\n\n # [*, N_res, H * P_v, 3]\n o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)\n\n if(_offload_inference):\n z[0] = z[0].to(o_pt.device)\n\n # [*, N_res, H, C_z]\n o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))\n\n # [*, N_res, H * C_z]\n o_pair = flatten_final_dims(o_pair, 2)\n\n # [*, N_res, C_s]\n s = self.linear_out(\n torch.cat(\n (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1\n ).to(dtype=z[0].dtype)\n )\n \n return s"
},
{
"identifier": "StructureModuleTransition",
"path": "frame2seq/openfold/model/structure_module.py",
"snippet": "class StructureModuleTransition(nn.Module):\n def __init__(self, c, num_layers, dropout_rate):\n super(StructureModuleTransition, self).__init__()\n\n self.c = c\n self.num_layers = num_layers\n self.dropout_rate = dropout_rate\n\n self.layers = nn.ModuleList()\n for _ in range(self.num_layers):\n l = StructureModuleTransitionLayer(self.c)\n self.layers.append(l)\n\n self.dropout = nn.Dropout(self.dropout_rate)\n self.layer_norm = LayerNorm(self.c)\n\n def forward(self, s):\n for l in self.layers:\n s = l(s)\n\n s = self.dropout(s)\n s = self.layer_norm(s)\n\n return s"
},
{
"identifier": "EdgeTransition",
"path": "frame2seq/model/edge_update.py",
"snippet": "class EdgeTransition(nn.Module):\n \"\"\"\n Edge update operation.\n \"\"\"\n\n def __init__(self,\n node_embed_size,\n edge_embed_in,\n edge_embed_out,\n num_layers=2,\n node_dilation=2):\n super(EdgeTransition, self).__init__()\n\n bias_embed_size = node_embed_size // node_dilation\n self.initial_embed = Linear(node_embed_size,\n bias_embed_size,\n init=\"relu\")\n hidden_size = bias_embed_size * 2 + edge_embed_in\n trunk_layers = []\n for _ in range(num_layers):\n trunk_layers.append(Linear(hidden_size, hidden_size, init=\"relu\"))\n trunk_layers.append(nn.ReLU())\n self.trunk = nn.Sequential(*trunk_layers)\n self.final_layer = Linear(hidden_size, edge_embed_out, init=\"final\")\n self.layer_norm = nn.LayerNorm(edge_embed_out)\n\n def forward(self, node_embed, edge_embed):\n node_embed = self.initial_embed(node_embed)\n batch_size, num_res, _ = node_embed.shape\n edge_bias = torch.cat([\n torch.tile(node_embed[:, :, None, :], (1, 1, num_res, 1)),\n torch.tile(node_embed[:, None, :, :], (1, num_res, 1, 1)),\n ],\n axis=-1)\n edge_embed = torch.cat([edge_embed, edge_bias],\n axis=-1).reshape(batch_size * num_res**2, -1)\n edge_embed = self.final_layer(self.trunk(edge_embed) + edge_embed)\n edge_embed = self.layer_norm(edge_embed)\n edge_embed = edge_embed.reshape(batch_size, num_res, num_res, -1)\n return edge_embed"
},
{
"identifier": "make_s_init",
"path": "frame2seq/utils/featurize.py",
"snippet": "def make_s_init(self, X, input_S, seq_mask):\n \"\"\"\n Generate the initial sequence embedding.\n \"\"\"\n batch_size, seq_len, _, _ = X.shape\n seq_mask = seq_mask.to(X.device)\n input_S = input_S.to(X.device)\n\n def process_input_S(input_S):\n input_S_mask = torch.ones(input_S.shape[0], input_S.shape[1],\n 1).to(input_S.device)\n input_S_ints = torch.argmax(input_S, dim=-1)\n input_S_mask[input_S_ints == 20] = 0\n input_S = self.input_sequence_to_single(input_S)\n in_S = input_S * input_S_mask\n return in_S\n\n def absolute_positional_emb(seq_len, dim):\n \"\"\"\n Generate absolute positional embeddings.\n \"\"\"\n pe = torch.zeros(seq_len, dim)\n position = torch.arange(0, seq_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *\n -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n return pe # (L, D)\n\n # code credit: https://github.com/jingraham/neurips19-graph-protein-design\n def _dihedrals(X, eps=1e-7):\n \"\"\"\n Compute dihedral angles from a set of coordinates.\n \"\"\"\n X = X[:, :, :3, :].reshape(X.shape[0], 3 * X.shape[1], 3)\n dX = X[:, 1:, :] - X[:, :-1, :]\n U = F.normalize(dX, dim=-1)\n u_2 = U[:, :-2, :]\n u_1 = U[:, 1:-1, :]\n u_0 = U[:, 2:, :]\n n_2 = F.normalize(torch.cross(u_2, u_1, dim=-1), dim=-1)\n n_1 = F.normalize(torch.cross(u_1, u_0, dim=-1), dim=-1)\n cosD = (n_2 * n_1).sum(-1)\n cosD = torch.clamp(cosD, -1 + eps, 1 - eps)\n D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)\n D = F.pad(D, (1, 2), 'constant', 0)\n D = D.view((D.size(0), int(D.size(1) / 3), 3))\n phi, psi, omega = torch.unbind(D, -1)\n D_features = torch.cat((torch.cos(D), torch.sin(D)), 2)\n return D_features # (B, L, 6), the 6 is cos(phi), sin(phi), cos(psi), sin(psi), cos(omega), sin(omega)\n\n in_S = process_input_S(input_S) # (B, L, D)\n in_S = in_S.to(X.device)\n d_feat = _dihedrals(X).float() # (B, L, 6)\n s_pos_emb = absolute_positional_emb(seq_len, self.single_dim)\n s_pos_emb = repeat(s_pos_emb, 'l d -> b l d', b=batch_size) # (B, L, D)\n s_pos_emb = s_pos_emb.to(X.device)\n s_init = torch.cat([s_pos_emb, d_feat], dim=-1) # (B, L, D+6)\n return s_init, in_S"
},
{
"identifier": "make_z_init",
"path": "frame2seq/utils/featurize.py",
"snippet": "def make_z_init(self, X):\n \"\"\"\n Generate the initial pairwise embedding.\n \"\"\"\n\n def relative_pairwise_position_idx(seq_len):\n \"\"\"\n Generate relative pairwise position indices.\n \"\"\"\n indices = torch.arange(seq_len, dtype=torch.long)\n indices = indices[:, None] - indices[None, :]\n indices = indices.clamp(-self.relpos_k, self.relpos_k)\n indices = indices + self.relpos_k\n return indices\n\n # code credit: https://github.com/jingraham/neurips19-graph-protein-design\n def rbf(D):\n \"\"\"\n Radial basis functions.\n \"\"\"\n device = D.device\n D_min, D_max, D_count = 0., self.dist_bins * self.dist_bin_width, self.dist_bins\n D_mu = torch.linspace(D_min, D_max, D_count, device=device)\n D_mu = D_mu.view([1, 1, 1, -1])\n D_sigma = (D_max - D_min) / D_count\n D_expand = torch.unsqueeze(D, -1)\n RBF = torch.exp(-((D_expand - D_mu) / D_sigma)**2)\n return RBF\n\n batch_size = X.shape[0]\n relpos = relative_pairwise_position_idx(X.shape[1])\n relpos = F.one_hot(relpos, 2 * self.relpos_k + 1).float()\n relpos = relpos.unsqueeze(0).repeat(batch_size, 1, 1, 1).to(X.device)\n X_bb = X[:, :, :4]\n X_bb = rearrange(X_bb, 'b n c d -> b (n c) d', c=4)\n pairwise_distances = torch.cdist(X_bb, X_bb)\n RBF = rbf(pairwise_distances)\n RBF = rearrange(RBF,\n \"b (n1 c1) (n2 c2) d -> b n1 n2 (c1 c2 d)\",\n c1=4,\n c2=4).to(X.device)\n z = torch.cat([RBF, relpos], dim=-1)\n z = z.float()\n\n return z"
}
] | import torch.nn as nn
import pytorch_lightning as pl
from torch.utils.checkpoint import checkpoint
from frame2seq.utils.rigid_utils import Rigid
from frame2seq.openfold.model.primitives import LayerNorm
from frame2seq.openfold.model.structure_module import InvariantPointAttention, StructureModuleTransition
from frame2seq.model.edge_update import EdgeTransition
from frame2seq.utils.featurize import make_s_init, make_z_init | 10,317 |
class frame2seq(pl.LightningModule):
def __init__(self, config):
super(frame2seq, self).__init__()
self.save_hyperparameters()
config = self.hparams.config
self.config = config
ipa_depth = config['ipa_depth']
ipa_dim = config['ipa_dim']
ipa_heads = config['ipa_heads']
ipa_pairwise_repr_dim = config['ipa_pairwise_repr_dim']
self.st_mod_tsit_factor = config['st_mod_tsit_factor']
self.sequence_dim = config['sequence_dim']
self.single_dim = config['single_dim']
self.torsion_bin_width = 8
self.torsion_bins = 360 // self.torsion_bin_width
self.relpos_k = 32
self.dist_bin_width = 0.5
self.dist_bins = 24
self.pair_dim = 16 * self.dist_bins + 2 * self.relpos_k + 1
self.sequence_to_single = nn.Linear(6 + self.single_dim,
self.single_dim)
self.edge_to_pair = nn.Linear(self.pair_dim, ipa_pairwise_repr_dim)
self.single_to_sequence = nn.Linear(self.single_dim, self.sequence_dim)
self.layers = nn.ModuleList([])
for i in range(ipa_depth):
|
class frame2seq(pl.LightningModule):
def __init__(self, config):
super(frame2seq, self).__init__()
self.save_hyperparameters()
config = self.hparams.config
self.config = config
ipa_depth = config['ipa_depth']
ipa_dim = config['ipa_dim']
ipa_heads = config['ipa_heads']
ipa_pairwise_repr_dim = config['ipa_pairwise_repr_dim']
self.st_mod_tsit_factor = config['st_mod_tsit_factor']
self.sequence_dim = config['sequence_dim']
self.single_dim = config['single_dim']
self.torsion_bin_width = 8
self.torsion_bins = 360 // self.torsion_bin_width
self.relpos_k = 32
self.dist_bin_width = 0.5
self.dist_bins = 24
self.pair_dim = 16 * self.dist_bins + 2 * self.relpos_k + 1
self.sequence_to_single = nn.Linear(6 + self.single_dim,
self.single_dim)
self.edge_to_pair = nn.Linear(self.pair_dim, ipa_pairwise_repr_dim)
self.single_to_sequence = nn.Linear(self.single_dim, self.sequence_dim)
self.layers = nn.ModuleList([])
for i in range(ipa_depth):
| ipa = InvariantPointAttention( | 2 | 2023-12-25 09:29:36+00:00 | 12k |
wwxu21/CUT | finetune_unlikelihood.py | [
{
"identifier": "LlamaForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config, threshold):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.threshold = threshold\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n weight_like: Optional[torch.Tensor] = None,\n weight_unlike: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n probs = torch.softmax(logits,dim=2)\n batch_size2, seq_length, hidden_size = probs.size()\n batch_size = batch_size2 // 2\n \n loss = None\n unlike_mask = weight_unlike.ne(-1).view(-1).to(probs.device)\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_probs_pos = probs[:batch_size][..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = NLLLoss()\n shift_probs_pos = shift_probs_pos.view(-1, self.config.vocab_size)\n shift_logits = torch.log(shift_probs_pos)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n \n loss = loss\n if unlike_mask.any():\n loss_unlike = self.unlikelihood(probs, labels, labels_neg, weight_unlike, unlike_mask)\n loss = (loss_unlike + loss) / 2 \n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n def unlikelihood(self, probs, labels, labels_neg, weight_unlike, unlike_mask):\n labels = labels.to(probs.device)\n labels_neg = labels_neg.to(probs.device)\n weight_unlike = weight_unlike.to(probs.device)\n shift_probs = probs[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n shift_labels_neg = labels_neg[..., 1:].contiguous()\n valid_indices = shift_labels[unlike_mask] != -100\n valid_indices_neg = shift_labels_neg[unlike_mask] != -100\n # assert (valid_indices == valid_indices_neg).all()\n batch_size2, seq_length, hidden_size = shift_probs.size()\n batch_size = batch_size2 // 2\n device = probs.device\n label_clamped = torch.clamp(shift_labels, min=0, max=hidden_size - 1) \n label_clamped_neg = torch.clamp(shift_labels_neg, min=0, max=hidden_size - 1)\n rows, cols = torch.meshgrid(torch.arange(batch_size, device=device), torch.arange(seq_length, device=device))\n probs_out = shift_probs[:batch_size][rows, cols, label_clamped][unlike_mask]\n probs_out_neg = shift_probs[batch_size:][rows, cols, label_clamped_neg][unlike_mask]\n valid_prob = probs_out[valid_indices]\n valid_prob_neg = probs_out_neg[valid_indices_neg]\n scale = (valid_prob / valid_prob_neg).detach()\n unlike_indices = scale > self.threshold # give some margins\n valid_prob_neg[unlike_indices] = 1 - valid_prob_neg[unlike_indices]\n valid_prob_neg[valid_prob_neg == 0] += 1e-5 # avoid 0\n valid_lprob_neg = torch.log(valid_prob_neg)\n valid_lprob_neg[unlike_indices] = weight_unlike[unlike_mask][0][0] * valid_lprob_neg[unlike_indices]\n valid_lprob_neg[~unlike_indices] = valid_lprob_neg[~unlike_indices]\n loss_unlike = -torch.sum(valid_lprob_neg)/ valid_lprob_neg.size(0)\n return loss_unlike\n\n \n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past"
},
{
"identifier": "PeftModelForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n weight_like=None,\n weight_unlike=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n kwargs.update({'weight_like':weight_like, 'weight_unlike':weight_unlike, \"labels_neg\": labels_neg})\n input_ids = torch.cat([input_ids, input_ids_neg], dim=0)\n attention_mask = torch.cat([attention_mask, attention_mask_neg], dim=0)\n if not peft_config.is_prompt_learning:\n if self.base_model.config.model_type == \"mpt\":\n if inputs_embeds is not None:\n raise AssertionError(\"forward in MPTForCausalLM does not support inputs_embeds\")\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n batch_size = _get_batch_size(input_ids, inputs_embeds)\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n if hasattr(self.base_model, \"model\"):\n self.base_model.model.generation_config = self.generation_config\n else:\n self.base_model.generation_config = self.generation_config\n try:\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if peft_config.is_prompt_learning:\n if model_kwargs.get(\"attention_mask\", None) is not None:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n model_kwargs[\"position_ids\"] = None\n\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs"
},
{
"identifier": "Prompter",
"path": "prompter.py",
"snippet": "class Prompter(object):\n __slots__ = (\"template\", \"_verbose\")\n\n def __init__(self, template_name: str = \"\", verbose: bool = False):\n self._verbose = verbose\n if not template_name:\n # Enforce the default here, so the constructor can be called with '' and will not break.\n template_name = \"alpaca\"\n file_name = osp.join(\"templates\", f\"{template_name}.json\")\n if not osp.exists(file_name):\n raise ValueError(f\"Can't read {file_name}\")\n with open(file_name) as fp:\n self.template = json.load(fp)\n if self._verbose:\n print(\n f\"Using prompt template {template_name}: {self.template['description']}\"\n )\n\n def generate_prompt(\n self,\n data_point,\n output=False,\n ) -> str:\n # returns the full prompt from instruction and optional input\n # if a label (=response, =output) is provided, it's also appended.\n instruction = data_point['instruction']\n label = data_point['output']\n res = instruction\n if output:\n res = f\"{res}{label}\"\n if self._verbose:\n print(res)\n return res\n\n def get_response(self, output: str) -> str:\n return output.split(self.template[\"response_split\"])[1].strip()"
}
] | import os
import sys
import json
import fire
import torch
import transformers
import numpy as np
import random
from typing import List
from torch.utils.data import DataLoader
from datasets import load_dataset, concatenate_datasets, Dataset
from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from peft import (
LoraConfig,
prepare_model_for_int8_training,
set_peft_model_state_dict,
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
PeftModel,
)
from peft.utils import _prepare_prompt_learning_config
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from transformers import LlamaTokenizer, LlamaConfig
from modeling_llama_unlikelihood import LlamaForCausalLM, PeftModelForCausalLM
from prompter import Prompter
from typing import Optional, Union, Any
from dataclasses import dataclass | 7,617 | set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
prompter = Prompter(prompt_template_name)
if not debug:
device_map = "auto"
else:
device_map = "cpu"
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
ddp = world_size != 1
if ddp:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
gradient_accumulation_steps = gradient_accumulation_steps // world_size
print("gradient_accumulation_steps: ", gradient_accumulation_steps)
# Check if parameter passed or if set within environ
use_wandb = len(wandb_project) > 0 or (
"WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
)
use_wandb =False
# Only overwrite environ if wandb param passed
if len(wandb_project) > 0:
os.environ["WANDB_PROJECT"] = wandb_project
if len(wandb_watch) > 0:
os.environ["WANDB_WATCH"] = wandb_watch
if len(wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = wandb_log_model
if not debug:
| seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@dataclass
class MyDataCollator:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if labels_neg is not None:
max_label_length_neg = max(len(l) for l in labels_neg)
max_label_length = max(max_label_length, max_label_length_neg)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
# self.tokenizer.padding_side = "left"
padding_side = self.tokenizer.padding_side
for feature in features:
feature['weight_like'] = [feature['weight_like']]
feature['weight_unlike'] = [feature['weight_unlike']]
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
remainder_length = max_label_length - len(feature["labels_neg"])
remainder_label = [self.label_pad_token_id] * remainder_length
remainder_ids = [self.tokenizer.pad_token_id] * remainder_length
remainder_mask = [0] * remainder_length
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
feature["labels_neg"] = (
feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"]
)
feature["input_ids_neg"] = (
feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"]
)
feature["attention_mask_neg"] = (
feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=max_label_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
prompter = Prompter(prompt_template_name)
if not debug:
device_map = "auto"
else:
device_map = "cpu"
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
ddp = world_size != 1
if ddp:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
gradient_accumulation_steps = gradient_accumulation_steps // world_size
print("gradient_accumulation_steps: ", gradient_accumulation_steps)
# Check if parameter passed or if set within environ
use_wandb = len(wandb_project) > 0 or (
"WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
)
use_wandb =False
# Only overwrite environ if wandb param passed
if len(wandb_project) > 0:
os.environ["WANDB_PROJECT"] = wandb_project
if len(wandb_watch) > 0:
os.environ["WANDB_WATCH"] = wandb_watch
if len(wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = wandb_log_model
if not debug: | model = LlamaForCausalLM.from_pretrained( | 0 | 2023-12-22 07:32:19+00:00 | 12k |
usail-hkust/LLMTSCS | utils/oneline.py | [
{
"identifier": "DIC_AGENTS",
"path": "utils/config.py",
"snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\n\n \"EfficientPressLight\": PressLightAgentOne,\n \"EfficientColight\": CoLightAgent,\n \"EfficientMPLight\": MPLightAgent,\n \"MPLight\": MPLightAgent,\n \"Colight\": CoLightAgent,\n\n \"AdvancedMPLight\": AdvancedMPLightAgent,\n \"AdvancedColight\": CoLightAgent,\n \"AdvancedDQN\": SimpleDQNAgentOne,\n \"Attend\": AttendLightAgent,\n \"ChatGPTTLCSWaitTimeForecast\": ChatGPTTLCS_Wait_Time_Forecast,\n \"ChatGPTTLCSCommonsense\": ChatGPTTLCS_Commonsense,\n \"ChatGPTTLCSCommonsenseFlowCoordination\": ChatGPTTLCS_Commonsense_Flow_Coordination,\n \"ChatGPTTLCSWaitTimeForecastCode\": ChatGPTTLCS_Wait_Time_Forecast_Code,\n \"ChatGPTTLCSCommonsenseCode\": ChatGPTTLCS_Commonsense_Code,\n \"ChatGPTTLCSCommonsenseFlowCoordinationCode\": ChatGPTTLCS_Commonsense_Flow_Coordination_Code,\n \"ChatGPTTLCSZeroKnowledge\": ChatGPTTLCS_Zero_Knowledge,\n \"ChatGPTTLCSZeroKnowledgeCode\": ChatGPTTLCS_Zero_Knowledge_Code,\n \"LLMTLCSWaitTimeForecast\": LLM_TLCS_Wait_Time_Forecast,\n \"LLMTLCSCommonsense\": LLM_TLCS_Commonsense,\n}"
},
{
"identifier": "merge",
"path": "utils/my_utils.py",
"snippet": "def merge(dic_tmp, dic_to_change):\r\ndef load_json(file):\r\ndef dump_json(data, file, indent=None):\r\ndef calculate_road_length(road_points):\r\ndef get_state(roads, env):\r\ndef get_state_detail(roads, env):\r\ndef get_state_three_segment(roads, env):\r\ndef trans_prompt_llama(message, chat_history, system_prompt):\r"
},
{
"identifier": "CityFlowEnv",
"path": "utils/cityflow_env.py",
"snippet": "class CityFlowEnv:\n\n def __init__(self, path_to_log, path_to_work_directory, dic_traffic_env_conf, dic_path):\n self.path_to_log = path_to_log\n self.path_to_work_directory = path_to_work_directory\n self.dic_traffic_env_conf = dic_traffic_env_conf\n self.dic_path = dic_path\n\n self.current_time = None\n self.id_to_index = None\n self.traffic_light_node_dict = None\n self.intersection_dict = None\n self.eng = None\n self.list_intersection = None\n self.list_inter_log = None\n self.list_lanes = None\n self.system_states = None\n self.lane_length = None\n self.waiting_vehicle_list = {}\n\n # check min action time\n if self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"] <= self.dic_traffic_env_conf[\"YELLOW_TIME\"]:\n \"\"\" include the yellow time in action time \"\"\"\n print(\"MIN_ACTION_TIME should include YELLOW_TIME\")\n sys.exit()\n\n # touch new inter_{}.pkl (if exists, remove)\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n f.close()\n\n def reset(self):\n print(\" ============= self.eng.reset() to be implemented ==========\")\n if not os.path.isdir(\"./frontend/web\"):\n os.mkdir(\"./frontend/web\")\n cityflow_config = {\n \"interval\": self.dic_traffic_env_conf[\"INTERVAL\"],\n \"seed\": int(np.random.randint(0, 100)),\n \"laneChange\": True,\n \"dir\": self.path_to_work_directory+\"/\",\n \"roadnetFile\": self.dic_traffic_env_conf[\"ROADNET_FILE\"],\n \"flowFile\": self.dic_traffic_env_conf[\"TRAFFIC_FILE\"],\n \"rlTrafficLight\": True,\n \"saveReplay\": True, # if \"GPT\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] or \"llm\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] else False,\n \"roadnetLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-roadnetLogFile.json\",\n \"replayLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-replayLogFile.txt\"\n }\n # print(cityflow_config)\n with open(os.path.join(self.path_to_work_directory, \"cityflow.config\"), \"w\") as json_file:\n json.dump(cityflow_config, json_file)\n\n self.eng = engine.Engine(os.path.join(self.path_to_work_directory, \"cityflow.config\"), thread_num=1)\n\n # get adjacency\n self.traffic_light_node_dict = self._adjacency_extraction()\n\n # get lane length\n _, self.lane_length = self.get_lane_length()\n\n # initialize intersections (grid)\n self.list_intersection = [Intersection((i+1, j+1), self.dic_traffic_env_conf, self.eng,\n self.traffic_light_node_dict[\"intersection_{0}_{1}\".format(i+1, j+1)],\n self.path_to_log,\n self.lane_length)\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"])\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"])]\n self.list_inter_log = [[] for _ in range(self.dic_traffic_env_conf[\"NUM_COL\"] *\n self.dic_traffic_env_conf[\"NUM_ROW\"])]\n\n self.id_to_index = {}\n count = 0\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"]):\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"]):\n self.id_to_index[\"intersection_{0}_{1}\".format(i+1, j+1)] = count\n count += 1\n\n self.list_lanes = []\n for inter in self.list_intersection:\n self.list_lanes += inter.list_lanes\n self.list_lanes = np.unique(self.list_lanes).tolist()\n\n # get new measurements\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance(),\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n state, done = self.get_state()\n\n # create roadnet dict\n if self.intersection_dict is None:\n self.create_intersection_dict()\n\n return state\n\n\n def create_intersection_dict(self):\n roadnet = load_json(f'./{self.dic_path[\"PATH_TO_DATA\"]}/{self.dic_traffic_env_conf[\"ROADNET_FILE\"]}')\n\n intersections_raw = roadnet[\"intersections\"]\n roads_raw = roadnet[\"roads\"]\n\n agent_intersections = {}\n\n # init agent intersections\n for i, inter in enumerate(intersections_raw):\n inter_id = inter[\"id\"]\n intersection = None\n for env_inter in self.list_intersection:\n if env_inter.inter_name == inter_id:\n intersection = env_inter\n break\n\n if len(inter['roadLinks']) > 0:\n # collect yellow allowed road links\n yellow_time = None\n phases = inter['trafficLight']['lightphases']\n all_sets = []\n yellow_phase_idx = None\n for p_i, p in enumerate(phases):\n all_sets.append(set(p['availableRoadLinks']))\n if p[\"time\"] < 30:\n yellow_phase_idx = p_i\n yellow_time = p[\"time\"]\n yellow_allowed_links = reduce(lambda x, y: x & y, all_sets)\n\n # init intersection\n agent_intersections[inter_id] = {\"phases\": {\"Y\": {\"time\": yellow_time, \"idx\": yellow_phase_idx}},\n \"roads\": {}}\n\n # init roads\n roads = {}\n for r in inter[\"roads\"]:\n roads[r] = {\"location\": None, \"type\": \"incoming\", \"go_straight\": None, \"turn_left\": None,\n \"turn_right\": None, \"length\": None, \"max_speed\": None,\n \"lanes\": {\"go_straight\": [], \"turn_left\": [], \"turn_right\": []}}\n\n # collect road length speed info & init road location\n road_links = inter[\"roadLinks\"]\n for r in roads_raw:\n r_id = r[\"id\"]\n if r_id in roads:\n roads[r_id][\"length\"] = calculate_road_length(r[\"points\"])\n roads[r_id][\"max_speed\"] = r[\"lanes\"][0][\"maxSpeed\"]\n for env_road_location in intersection.dic_entering_approach_to_edge:\n if intersection.dic_entering_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n for env_road_location in intersection.dic_exiting_approach_to_edge:\n if intersection.dic_exiting_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n\n # collect signal phase info\n for p_idx, p in enumerate(phases):\n other_allowed_links = set(p['availableRoadLinks']) - yellow_allowed_links\n if len(other_allowed_links) > 0:\n allowed_directions = []\n for l_idx in other_allowed_links:\n link = road_links[l_idx]\n location = roads[link[\"startRoad\"]][\"location\"]\n direction = link[\"type\"]\n allowed_directions.append(f\"{location_dict[location]}{direction_dict[direction]}\")\n allowed_directions = sorted(allowed_directions)\n allowed_directions = f\"{allowed_directions[0]}{allowed_directions[1]}\"\n agent_intersections[inter_id][\"phases\"][allowed_directions] = {\"time\": p[\"time\"], \"idx\": p_idx}\n\n # collect location type direction info\n for r_link in road_links:\n start = r_link['startRoad']\n end = r_link['endRoad']\n lane_links = r_link['laneLinks']\n\n for r in roads:\n if r != start:\n continue\n # collect type\n roads[r][\"type\"] = \"outgoing\"\n\n # collect directions\n if r_link[\"type\"] == \"go_straight\":\n roads[r][\"go_straight\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"go_straight\"]:\n roads[r][\"lanes\"][\"go_straight\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_left\":\n roads[r][\"turn_left\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_left\"]:\n roads[r][\"lanes\"][\"turn_left\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_right\":\n roads[r][\"turn_right\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_right\"]:\n roads[r][\"lanes\"][\"turn_right\"].append(lane_id)\n\n agent_intersections[inter_id][\"roads\"] = roads\n\n self.intersection_dict = agent_intersections\n\n def step(self, action):\n\n step_start_time = time.time()\n\n list_action_in_sec = [action]\n list_action_in_sec_display = [action]\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]-1):\n if self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"switch\":\n list_action_in_sec.append(np.zeros_like(action).tolist())\n elif self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"set\":\n list_action_in_sec.append(np.copy(action).tolist())\n list_action_in_sec_display.append(np.full_like(action, fill_value=-1).tolist())\n\n average_reward_action_list = [0]*len(action)\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]):\n\n action_in_sec = list_action_in_sec[i]\n action_in_sec_display = list_action_in_sec_display[i]\n\n instant_time = self.get_current_time()\n self.current_time = self.get_current_time()\n\n before_action_feature = self.get_feature()\n # state = self.get_state()\n\n if i == 0:\n print(\"time: {0}\".format(instant_time))\n \n self._inner_step(action_in_sec)\n\n # get reward\n reward = self.get_reward()\n for j in range(len(reward)):\n average_reward_action_list[j] = (average_reward_action_list[j] * i + reward[j]) / (i + 1)\n self.log(cur_time=instant_time, before_action_feature=before_action_feature, action=action_in_sec_display)\n next_state, done = self.get_state()\n\n print(\"Step time: \", time.time() - step_start_time)\n return next_state, reward, done, average_reward_action_list\n\n def _inner_step(self, action):\n # copy current measurements to previous measurements\n for inter in self.list_intersection:\n inter.update_previous_measurements()\n # set signals\n # multi_intersection decided by action {inter_id: phase}\n for inter_ind, inter in enumerate(self.list_intersection):\n inter.set_signal(\n action=action[inter_ind],\n action_pattern=self.dic_traffic_env_conf[\"ACTION_PATTERN\"],\n yellow_time=self.dic_traffic_env_conf[\"YELLOW_TIME\"],\n path_to_log=self.path_to_log\n )\n\n # run one step\n for i in range(int(1/self.dic_traffic_env_conf[\"INTERVAL\"])):\n self.eng.next_step()\n\n # update queuing vehicle info\n vehicle_ids = self.eng.get_vehicles(include_waiting=False)\n for v_id in vehicle_ids:\n v_info = self.eng.get_vehicle_info(v_id)\n speed = float(v_info[\"speed\"])\n if speed < 0.1:\n if v_id not in self.waiting_vehicle_list:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n if self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n self.waiting_vehicle_list[v_id][\"time\"] += self.dic_traffic_env_conf[\"INTERVAL\"]\n else:\n if v_id in self.waiting_vehicle_list:\n self.waiting_vehicle_list.pop(v_id)\n\n if v_id in self.waiting_vehicle_list and self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list.pop(v_id)\n\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance()\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n\n def get_feature(self):\n list_feature = [inter.get_feature() for inter in self.list_intersection]\n return list_feature\n\n def get_state(self, list_state_feature=None):\n if list_state_feature is not None:\n list_state = [inter.get_state(list_state_feature) for inter in self.list_intersection]\n done = False\n else:\n list_state = [inter.get_state(self.dic_traffic_env_conf[\"LIST_STATE_FEATURE\"]) for inter in self.list_intersection]\n done = False\n return list_state, done\n\n def get_reward(self):\n list_reward = [inter.get_reward(self.dic_traffic_env_conf[\"DIC_REWARD_INFO\"]) for inter in self.list_intersection]\n return list_reward\n\n def get_current_time(self):\n return self.eng.get_current_time()\n\n def log(self, cur_time, before_action_feature, action):\n\n for inter_ind in range(len(self.list_intersection)):\n self.list_inter_log[inter_ind].append({\"time\": cur_time,\n \"state\": before_action_feature[inter_ind],\n \"action\": action[inter_ind]})\n\n def batch_log_2(self):\n \"\"\"\n Used for model test, only log the vehicle_inter_.csv\n \"\"\"\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n\n def batch_log(self, start, stop):\n for inter_ind in range(start, stop):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n \n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n pickle.dump(self.list_inter_log[inter_ind], f)\n f.close()\n\n def bulk_log_multi_process(self, batch_size=100):\n assert len(self.list_intersection) == len(self.list_inter_log)\n if batch_size > len(self.list_intersection):\n batch_size_run = len(self.list_intersection)\n else:\n batch_size_run = batch_size\n process_list = []\n for batch in range(0, len(self.list_intersection), batch_size_run):\n start = batch\n stop = min(batch + batch_size, len(self.list_intersection))\n p = Process(target=self.batch_log, args=(start, stop))\n print(\"before\")\n p.start()\n print(\"end\")\n process_list.append(p)\n print(\"before join\")\n\n for t in process_list:\n t.join()\n print(\"end join\")\n\n def _adjacency_extraction(self):\n traffic_light_node_dict = {}\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(\"{0}\".format(file)) as json_data:\n net = json.load(json_data)\n for inter in net[\"intersections\"]:\n if not inter[\"virtual\"]:\n traffic_light_node_dict[inter[\"id\"]] = {\"location\": {\"x\": float(inter[\"point\"][\"x\"]),\n \"y\": float(inter[\"point\"][\"y\"])},\n \"total_inter_num\": None, \"adjacency_row\": None,\n \"inter_id_to_index\": None,\n \"neighbor_ENWS\": None}\n\n top_k = self.dic_traffic_env_conf[\"TOP_K_ADJACENCY\"]\n total_inter_num = len(traffic_light_node_dict.keys())\n inter_id_to_index = {}\n\n edge_id_dict = {}\n for road in net[\"roads\"]:\n if road[\"id\"] not in edge_id_dict.keys():\n edge_id_dict[road[\"id\"]] = {}\n edge_id_dict[road[\"id\"]][\"from\"] = road[\"startIntersection\"]\n edge_id_dict[road[\"id\"]][\"to\"] = road[\"endIntersection\"]\n\n index = 0\n for i in traffic_light_node_dict.keys():\n inter_id_to_index[i] = index\n index += 1\n\n for i in traffic_light_node_dict.keys():\n location_1 = traffic_light_node_dict[i][\"location\"]\n\n row = np.array([0]*total_inter_num)\n # row = np.zeros((self.dic_traffic_env_conf[\"NUM_ROW\"],self.dic_traffic_env_conf[\"NUM_col\"]))\n for j in traffic_light_node_dict.keys():\n location_2 = traffic_light_node_dict[j][\"location\"]\n dist = self._cal_distance(location_1, location_2)\n row[inter_id_to_index[j]] = dist\n if len(row) == top_k:\n adjacency_row_unsorted = np.argpartition(row, -1)[:top_k].tolist()\n elif len(row) > top_k:\n adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()\n else:\n adjacency_row_unsorted = [k for k in range(total_inter_num)]\n adjacency_row_unsorted.remove(inter_id_to_index[i])\n traffic_light_node_dict[i][\"adjacency_row\"] = [inter_id_to_index[i]]+adjacency_row_unsorted\n traffic_light_node_dict[i][\"total_inter_num\"] = total_inter_num\n\n for i in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"total_inter_num\"] = inter_id_to_index\n traffic_light_node_dict[i][\"neighbor_ENWS\"] = []\n for j in range(4):\n road_id = i.replace(\"intersection\", \"road\")+\"_\"+str(j)\n if edge_id_dict[road_id][\"to\"] not in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(None)\n else:\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(edge_id_dict[road_id][\"to\"])\n\n return traffic_light_node_dict\n\n @staticmethod\n def _cal_distance(loc_dict1, loc_dict2):\n a = np.array((loc_dict1[\"x\"], loc_dict1[\"y\"]))\n b = np.array((loc_dict2[\"x\"], loc_dict2[\"y\"]))\n return np.sqrt(np.sum((a-b)**2))\n\n @staticmethod\n def end_cityflow():\n print(\"============== cityflow process end ===============\")\n\n def get_lane_length(self):\n \"\"\"\n newly added part for get lane length\n Read the road net file\n Return: dict{lanes} normalized with the min lane length\n \"\"\"\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(file) as json_data:\n net = json.load(json_data)\n roads = net['roads']\n lanes_length_dict = {}\n lane_normalize_factor = {}\n\n for road in roads:\n points = road[\"points\"]\n road_length = abs(points[0]['x'] + points[0]['y'] - points[1]['x'] - points[1]['y'])\n for i in range(3):\n lane_id = road['id'] + \"_{0}\".format(i)\n lanes_length_dict[lane_id] = road_length\n min_length = min(lanes_length_dict.values())\n\n for key, value in lanes_length_dict.items():\n lane_normalize_factor[key] = value / min_length\n return lane_normalize_factor, lanes_length_dict"
},
{
"identifier": "path_check",
"path": "utils/pipeline.py",
"snippet": "def path_check(dic_path):\n if os.path.exists(dic_path[\"PATH_TO_WORK_DIRECTORY\"]):\n if dic_path[\"PATH_TO_WORK_DIRECTORY\"] != \"records/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_WORK_DIRECTORY\"])\n if os.path.exists(dic_path[\"PATH_TO_MODEL\"]):\n if dic_path[\"PATH_TO_MODEL\"] != \"model/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_MODEL\"])"
},
{
"identifier": "copy_cityflow_file",
"path": "utils/pipeline.py",
"snippet": "def copy_cityflow_file(dic_path, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"TRAFFIC_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"TRAFFIC_FILE\"]))\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"ROADNET_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"ROADNET_FILE\"]))"
},
{
"identifier": "copy_conf_file",
"path": "utils/pipeline.py",
"snippet": "def copy_conf_file(dic_path, dic_agent_conf, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n json.dump(dic_agent_conf, open(os.path.join(path, \"agent.conf\"), \"w\"), indent=4)\n json.dump(dic_traffic_env_conf, open(os.path.join(path, \"traffic_env.conf\"), \"w\"), indent=4)"
}
] | from .config import DIC_AGENTS
from .my_utils import merge, get_state, get_state_detail, eight_phase_list, dump_json
from copy import deepcopy
from .cityflow_env import CityFlowEnv
from .pipeline import path_check, copy_cityflow_file, copy_conf_file
from tqdm import tqdm
import os
import time
import numpy as np
import wandb
import threading | 7,445 | path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
|
class OneLine:
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):
self.dic_agent_conf = dic_agent_conf
self.dic_traffic_env_conf = dic_traffic_env_conf
self.dic_path = dic_path
self.agents = []
self.env = None
self.roadnet = roadnet
self.trafficflow = trafficflow
self.models = []
self.initialize()
def initialize(self):
path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)): | state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]] | 1 | 2023-12-26 08:31:47+00:00 | 12k |
KyanChen/TTP | mmdet/models/dense_heads/dino_head.py | [
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "SampleList",
"path": "mmdet/structures/det_data_sample.py",
"snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):"
},
{
"identifier": "bbox_overlaps",
"path": "mmdet/structures/bbox/bbox_overlaps.py",
"snippet": "def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n \"\"\"Calculate overlap between two set of bboxes.\n\n FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n Note:\n Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n there are some new generated variable when calculating IOU\n using bbox_overlaps function:\n\n 1) is_aligned is False\n area1: M x 1\n area2: N x 1\n lt: M x N x 2\n rb: M x N x 2\n wh: M x N x 2\n overlap: M x N x 1\n union: M x N x 1\n ious: M x N x 1\n\n Total memory:\n S = (9 x N x M + N + M) * 4 Byte,\n\n When using FP16, we can reduce:\n R = (9 x N x M + N + M) * 4 / 2 Byte\n R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n N + 1 < 3 * N, when N or M is 1.\n\n Given M = 40 (ground truth), N = 400000 (three anchor boxes\n in per grid, FPN, R-CNNs),\n R = 275 MB (one times)\n\n A special case (dense detection), M = 512 (ground truth),\n R = 3516 MB = 3.43 GB\n\n When the batch size is B, reduce:\n B x R\n\n Therefore, CUDA memory runs out frequently.\n\n Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n | dtype | M | N | Use | Real | Ideal |\n |:----:|:----:|:----:|:----:|:----:|:----:|\n | FP32 | 512 | 400000 | 8020 MiB | -- | -- |\n | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |\n | FP32 | 40 | 400000 | 1540 MiB | -- | -- |\n | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |\n\n 2) is_aligned is True\n area1: N x 1\n area2: N x 1\n lt: N x 2\n rb: N x 2\n wh: N x 2\n overlap: N x 1\n union: N x 1\n ious: N x 1\n\n Total memory:\n S = 11 x N * 4 Byte\n\n When using FP16, we can reduce:\n R = 11 x N * 4 / 2 Byte\n\n So do the 'giou' (large than 'iou').\n\n Time-wise, FP16 is generally faster than FP32.\n\n When gpu_assign_thr is not -1, it takes more time on cpu\n but not reduce memory.\n There, we can reduce half the memory and keep the speed.\n\n If ``is_aligned`` is ``False``, then calculate the overlaps between each\n bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n pair of bboxes1 and bboxes2.\n\n Args:\n bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n B indicates the batch dim, in shape (B1, B2, ..., Bn).\n If ``is_aligned`` is ``True``, then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n >>> assert overlaps.shape == (3, 3)\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n >>> assert overlaps.shape == (3, )\n\n Example:\n >>> empty = torch.empty(0, 4)\n >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n \"\"\"\n\n assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n # Batch dim must be the same\n # Batch dim: (B1, B2, ... Bn)\n assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n batch_shape = bboxes1.shape[:-2]\n\n rows = bboxes1.size(-2)\n cols = bboxes2.size(-2)\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new(batch_shape + (rows, ))\n else:\n return bboxes1.new(batch_shape + (rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1])\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1])\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2]) # [B, rows, cols, 2]\n rb = torch.min(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2])\n enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in ['iou', 'iof']:\n return ious\n # calculate gious\n enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious"
},
{
"identifier": "bbox_cxcywh_to_xyxy",
"path": "mmdet/structures/bbox/transforms.py",
"snippet": "def bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor:\n \"\"\"Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).\n\n Args:\n bbox (Tensor): Shape (n, 4) for bboxes.\n\n Returns:\n Tensor: Converted bboxes.\n \"\"\"\n cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)\n bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]\n return torch.cat(bbox_new, dim=-1)"
},
{
"identifier": "bbox_xyxy_to_cxcywh",
"path": "mmdet/structures/bbox/transforms.py",
"snippet": "def bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor:\n \"\"\"Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).\n\n Args:\n bbox (Tensor): Shape (n, 4) for bboxes.\n\n Returns:\n Tensor: Converted bboxes.\n \"\"\"\n x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)\n bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]\n return torch.cat(bbox_new, dim=-1)"
},
{
"identifier": "reduce_mean",
"path": "mmdet/utils/dist_utils.py",
"snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor"
},
{
"identifier": "InstanceList",
"path": "mmdet/utils/typing_utils.py",
"snippet": ""
},
{
"identifier": "QualityFocalLoss",
"path": "mmdet/models/losses/gfocal_loss.py",
"snippet": "class QualityFocalLoss(nn.Module):\n r\"\"\"Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n Defaults to True.\n beta (float): The beta parameter for calculating the modulating factor.\n Defaults to 2.0.\n reduction (str): Options are \"none\", \"mean\" and \"sum\".\n loss_weight (float): Loss weight of current loss.\n activated (bool, optional): Whether the input is activated.\n If True, it means the input has been activated and can be\n treated as probabilities. Else, it should be treated as logits.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n use_sigmoid=True,\n beta=2.0,\n reduction='mean',\n loss_weight=1.0,\n activated=False):\n super(QualityFocalLoss, self).__init__()\n assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'\n self.use_sigmoid = use_sigmoid\n self.beta = beta\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.activated = activated\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n \"\"\"Forward function.\n\n Args:\n pred (torch.Tensor): Predicted joint representation of\n classification and quality (IoU) estimation with shape (N, C),\n C is the number of classes.\n target (Union(tuple([torch.Tensor]),Torch.Tensor)): The type is\n tuple, it should be included Target category label with\n shape (N,) and target quality label with shape (N,).The type\n is torch.Tensor, the target should be one-hot form with\n soft weights.\n weight (torch.Tensor, optional): The weight of loss for each\n prediction. Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The reduction method used to\n override the original reduction method of the loss.\n Defaults to None.\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if self.use_sigmoid:\n if self.activated:\n calculate_loss_func = quality_focal_loss_with_prob\n else:\n calculate_loss_func = quality_focal_loss\n if isinstance(target, torch.Tensor):\n # the target shape with (N,C) or (N,C,...), which means\n # the target is one-hot form with soft weights.\n calculate_loss_func = partial(\n quality_focal_loss_tensor_target, activated=self.activated)\n\n loss_cls = self.loss_weight * calculate_loss_func(\n pred,\n target,\n weight,\n beta=self.beta,\n reduction=reduction,\n avg_factor=avg_factor)\n else:\n raise NotImplementedError\n return loss_cls"
},
{
"identifier": "multi_apply",
"path": "mmdet/models/utils/misc.py",
"snippet": "def multi_apply(func, *args, **kwargs):\n \"\"\"Apply function to a list of arguments.\n\n Note:\n This function applies the ``func`` to multiple inputs and\n map the multiple outputs of the ``func`` into different\n list. Each list contains the same type of outputs corresponding\n to different inputs.\n\n Args:\n func (Function): A function that will be applied to a list of\n arguments\n\n Returns:\n tuple(list): A tuple containing multiple list, each list contains \\\n a kind of returned results by the function\n \"\"\"\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))"
},
{
"identifier": "DeformableDETRHead",
"path": "mmdet/models/dense_heads/deformable_detr_head.py",
"snippet": "class DeformableDETRHead(DETRHead):\n r\"\"\"Head of DeformDETR: Deformable DETR: Deformable Transformers for\n End-to-End Object Detection.\n\n Code is modified from the `official github repo\n <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/2010.04159>`_ .\n\n Args:\n share_pred_layer (bool): Whether to share parameters for all the\n prediction layers. Defaults to `False`.\n num_pred_layer (int): The number of the prediction layers.\n Defaults to 6.\n as_two_stage (bool, optional): Whether to generate the proposal\n from the outputs of encoder. Defaults to `False`.\n \"\"\"\n\n def __init__(self,\n *args,\n share_pred_layer: bool = False,\n num_pred_layer: int = 6,\n as_two_stage: bool = False,\n **kwargs) -> None:\n self.share_pred_layer = share_pred_layer\n self.num_pred_layer = num_pred_layer\n self.as_two_stage = as_two_stage\n\n super().__init__(*args, **kwargs)\n\n def _init_layers(self) -> None:\n \"\"\"Initialize classification branch and regression branch of head.\"\"\"\n fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n reg_branch = []\n for _ in range(self.num_reg_fcs):\n reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n reg_branch.append(nn.ReLU())\n reg_branch.append(Linear(self.embed_dims, 4))\n reg_branch = nn.Sequential(*reg_branch)\n\n if self.share_pred_layer:\n self.cls_branches = nn.ModuleList(\n [fc_cls for _ in range(self.num_pred_layer)])\n self.reg_branches = nn.ModuleList(\n [reg_branch for _ in range(self.num_pred_layer)])\n else:\n self.cls_branches = nn.ModuleList(\n [copy.deepcopy(fc_cls) for _ in range(self.num_pred_layer)])\n self.reg_branches = nn.ModuleList([\n copy.deepcopy(reg_branch) for _ in range(self.num_pred_layer)\n ])\n\n def init_weights(self) -> None:\n \"\"\"Initialize weights of the Deformable DETR head.\"\"\"\n if self.loss_cls.use_sigmoid:\n bias_init = bias_init_with_prob(0.01)\n for m in self.cls_branches:\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias, bias_init)\n for m in self.reg_branches:\n constant_init(m[-1], 0, bias=0)\n nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0)\n if self.as_two_stage:\n for m in self.reg_branches:\n nn.init.constant_(m[-1].bias.data[2:], 0.0)\n\n def forward(self, hidden_states: Tensor,\n references: List[Tensor]) -> Tuple[Tensor, Tensor]:\n \"\"\"Forward function.\n\n Args:\n hidden_states (Tensor): Hidden states output from each decoder\n layer, has shape (num_decoder_layers, bs, num_queries, dim).\n references (list[Tensor]): List of the reference from the decoder.\n The first reference is the `init_reference` (initial) and the\n other num_decoder_layers(6) references are `inter_references`\n (intermediate). The `init_reference` has shape (bs,\n num_queries, 4) when `as_two_stage` of the detector is `True`,\n otherwise (bs, num_queries, 2). Each `inter_reference` has\n shape (bs, num_queries, 4) when `with_box_refine` of the\n detector is `True`, otherwise (bs, num_queries, 2). The\n coordinates are arranged as (cx, cy) when the last dimension is\n 2, and (cx, cy, w, h) when it is 4.\n\n Returns:\n tuple[Tensor]: results of head containing the following tensor.\n\n - all_layers_outputs_classes (Tensor): Outputs from the\n classification head, has shape (num_decoder_layers, bs,\n num_queries, cls_out_channels).\n - all_layers_outputs_coords (Tensor): Sigmoid outputs from the\n regression head with normalized coordinate format (cx, cy, w,\n h), has shape (num_decoder_layers, bs, num_queries, 4) with the\n last dimension arranged as (cx, cy, w, h).\n \"\"\"\n all_layers_outputs_classes = []\n all_layers_outputs_coords = []\n\n for layer_id in range(hidden_states.shape[0]):\n reference = inverse_sigmoid(references[layer_id])\n # NOTE The last reference will not be used.\n hidden_state = hidden_states[layer_id]\n outputs_class = self.cls_branches[layer_id](hidden_state)\n tmp_reg_preds = self.reg_branches[layer_id](hidden_state)\n if reference.shape[-1] == 4:\n # When `layer` is 0 and `as_two_stage` of the detector\n # is `True`, or when `layer` is greater than 0 and\n # `with_box_refine` of the detector is `True`.\n tmp_reg_preds += reference\n else:\n # When `layer` is 0 and `as_two_stage` of the detector\n # is `False`, or when `layer` is greater than 0 and\n # `with_box_refine` of the detector is `False`.\n assert reference.shape[-1] == 2\n tmp_reg_preds[..., :2] += reference\n outputs_coord = tmp_reg_preds.sigmoid()\n all_layers_outputs_classes.append(outputs_class)\n all_layers_outputs_coords.append(outputs_coord)\n\n all_layers_outputs_classes = torch.stack(all_layers_outputs_classes)\n all_layers_outputs_coords = torch.stack(all_layers_outputs_coords)\n\n return all_layers_outputs_classes, all_layers_outputs_coords\n\n def loss(self, hidden_states: Tensor, references: List[Tensor],\n enc_outputs_class: Tensor, enc_outputs_coord: Tensor,\n batch_data_samples: SampleList) -> dict:\n \"\"\"Perform forward propagation and loss calculation of the detection\n head on the queries of the upstream network.\n\n Args:\n hidden_states (Tensor): Hidden states output from each decoder\n layer, has shape (num_decoder_layers, num_queries, bs, dim).\n references (list[Tensor]): List of the reference from the decoder.\n The first reference is the `init_reference` (initial) and the\n other num_decoder_layers(6) references are `inter_references`\n (intermediate). The `init_reference` has shape (bs,\n num_queries, 4) when `as_two_stage` of the detector is `True`,\n otherwise (bs, num_queries, 2). Each `inter_reference` has\n shape (bs, num_queries, 4) when `with_box_refine` of the\n detector is `True`, otherwise (bs, num_queries, 2). The\n coordinates are arranged as (cx, cy) when the last dimension is\n 2, and (cx, cy, w, h) when it is 4.\n enc_outputs_class (Tensor): The score of each point on encode\n feature map, has shape (bs, num_feat_points, cls_out_channels).\n Only when `as_two_stage` is `True` it would be passed in,\n otherwise it would be `None`.\n enc_outputs_coord (Tensor): The proposal generate from the encode\n feature map, has shape (bs, num_feat_points, 4) with the last\n dimension arranged as (cx, cy, w, h). Only when `as_two_stage`\n is `True` it would be passed in, otherwise it would be `None`.\n batch_data_samples (list[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n dict: A dictionary of loss components.\n \"\"\"\n batch_gt_instances = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n\n outs = self(hidden_states, references)\n loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,\n batch_gt_instances, batch_img_metas)\n losses = self.loss_by_feat(*loss_inputs)\n return losses\n\n def loss_by_feat(\n self,\n all_layers_cls_scores: Tensor,\n all_layers_bbox_preds: Tensor,\n enc_cls_scores: Tensor,\n enc_bbox_preds: Tensor,\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None\n ) -> Dict[str, Tensor]:\n \"\"\"Loss function.\n\n Args:\n all_layers_cls_scores (Tensor): Classification scores of all\n decoder layers, has shape (num_decoder_layers, bs, num_queries,\n cls_out_channels).\n all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n layers. Each is a 4D-tensor with normalized coordinate format\n (cx, cy, w, h) and has shape (num_decoder_layers, bs,\n num_queries, 4) with the last dimension arranged as\n (cx, cy, w, h).\n enc_cls_scores (Tensor): The score of each point on encode\n feature map, has shape (bs, num_feat_points, cls_out_channels).\n Only when `as_two_stage` is `True` it would be passes in,\n otherwise, it would be `None`.\n enc_bbox_preds (Tensor): The proposal generate from the encode\n feature map, has shape (bs, num_feat_points, 4) with the last\n dimension arranged as (cx, cy, w, h). Only when `as_two_stage`\n is `True` it would be passed in, otherwise it would be `None`.\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n loss_dict = super().loss_by_feat(all_layers_cls_scores,\n all_layers_bbox_preds,\n batch_gt_instances, batch_img_metas,\n batch_gt_instances_ignore)\n\n # loss of proposal generated from encode feature map.\n if enc_cls_scores is not None:\n proposal_gt_instances = copy.deepcopy(batch_gt_instances)\n for i in range(len(proposal_gt_instances)):\n proposal_gt_instances[i].labels = torch.zeros_like(\n proposal_gt_instances[i].labels)\n enc_loss_cls, enc_losses_bbox, enc_losses_iou = \\\n self.loss_by_feat_single(\n enc_cls_scores, enc_bbox_preds,\n batch_gt_instances=proposal_gt_instances,\n batch_img_metas=batch_img_metas)\n loss_dict['enc_loss_cls'] = enc_loss_cls\n loss_dict['enc_loss_bbox'] = enc_losses_bbox\n loss_dict['enc_loss_iou'] = enc_losses_iou\n return loss_dict\n\n def predict(self,\n hidden_states: Tensor,\n references: List[Tensor],\n batch_data_samples: SampleList,\n rescale: bool = True) -> InstanceList:\n \"\"\"Perform forward propagation and loss calculation of the detection\n head on the queries of the upstream network.\n\n Args:\n hidden_states (Tensor): Hidden states output from each decoder\n layer, has shape (num_decoder_layers, num_queries, bs, dim).\n references (list[Tensor]): List of the reference from the decoder.\n The first reference is the `init_reference` (initial) and the\n other num_decoder_layers(6) references are `inter_references`\n (intermediate). The `init_reference` has shape (bs,\n num_queries, 4) when `as_two_stage` of the detector is `True`,\n otherwise (bs, num_queries, 2). Each `inter_reference` has\n shape (bs, num_queries, 4) when `with_box_refine` of the\n detector is `True`, otherwise (bs, num_queries, 2). The\n coordinates are arranged as (cx, cy) when the last dimension is\n 2, and (cx, cy, w, h) when it is 4.\n batch_data_samples (list[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n rescale (bool, optional): If `True`, return boxes in original\n image space. Defaults to `True`.\n\n Returns:\n list[obj:`InstanceData`]: Detection results of each image\n after the post process.\n \"\"\"\n batch_img_metas = [\n data_samples.metainfo for data_samples in batch_data_samples\n ]\n\n outs = self(hidden_states, references)\n\n predictions = self.predict_by_feat(\n *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n return predictions\n\n def predict_by_feat(self,\n all_layers_cls_scores: Tensor,\n all_layers_bbox_preds: Tensor,\n batch_img_metas: List[Dict],\n rescale: bool = False) -> InstanceList:\n \"\"\"Transform a batch of output features extracted from the head into\n bbox results.\n\n Args:\n all_layers_cls_scores (Tensor): Classification scores of all\n decoder layers, has shape (num_decoder_layers, bs, num_queries,\n cls_out_channels).\n all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n layers. Each is a 4D-tensor with normalized coordinate format\n (cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries,\n 4) with the last dimension arranged as (cx, cy, w, h).\n batch_img_metas (list[dict]): Meta information of each image.\n rescale (bool, optional): If `True`, return boxes in original\n image space. Default `False`.\n\n Returns:\n list[obj:`InstanceData`]: Detection results of each image\n after the post process.\n \"\"\"\n cls_scores = all_layers_cls_scores[-1]\n bbox_preds = all_layers_bbox_preds[-1]\n\n result_list = []\n for img_id in range(len(batch_img_metas)):\n cls_score = cls_scores[img_id]\n bbox_pred = bbox_preds[img_id]\n img_meta = batch_img_metas[img_id]\n results = self._predict_by_feat_single(cls_score, bbox_pred,\n img_meta, rescale)\n result_list.append(results)\n return result_list"
}
] | from typing import Dict, List, Tuple
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import InstanceList, OptInstanceList, reduce_mean
from ..losses import QualityFocalLoss
from ..utils import multi_apply
from .deformable_detr_head import DeformableDETRHead
import torch | 10,501 | # between DINO and DeformableDETR.
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
# NOTE The enc_loss calculation of the DINO is
# different from that of Deformable DETR.
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_by_feat_single(
enc_cls_scores, enc_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
if all_layers_denoising_cls_scores is not None:
# calculate denoising loss from all decoder layers
dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
# collate denoising loss
loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
loss_dict['dn_loss_iou'] = dn_losses_iou[-1]
for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \
enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1],
dn_losses_iou[:-1])):
loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i
return loss_dict
def loss_dn(self, all_layers_denoising_cls_scores: Tensor,
all_layers_denoising_bbox_preds: Tensor,
batch_gt_instances: InstanceList, batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]:
"""Calculate denoising loss.
Args:
all_layers_denoising_cls_scores (Tensor): Classification scores of
all decoder layers in denoising part, has shape (
num_decoder_layers, bs, num_denoising_queries,
cls_out_channels).
all_layers_denoising_bbox_preds (Tensor): Regression outputs of all
decoder layers in denoising part. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and has shape
(num_decoder_layers, bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou
of each decoder layers.
"""
return multi_apply(
self._loss_dn_single,
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[Tensor]:
"""Denoising loss for outputs from a single decoder layer.
Args:
dn_cls_scores (Tensor): Classification scores of a single decoder
layer in denoising part, has shape (bs, num_denoising_queries,
cls_out_channels).
dn_bbox_preds (Tensor): Regression outputs of a single decoder
layer in denoising part. Each is a 4D-tensor with normalized
coordinate format (cx, cy, w, h) and has shape
(bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and
`loss_iou`.
"""
cls_reg_targets = self.get_dn_targets(batch_gt_instances,
batch_img_metas, dn_meta)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = \
num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
if len(cls_scores) > 0:
| # Copyright (c) OpenMMLab. All rights reserved.
@MODELS.register_module()
class DINOHead(DeformableDETRHead):
r"""Head of the DINO: DETR with Improved DeNoising Anchor Boxes
for End-to-End Object Detection
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DINO>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2203.03605>`_ .
"""
def loss(self, hidden_states: Tensor, references: List[Tensor],
enc_outputs_class: Tensor, enc_outputs_coord: Tensor,
batch_data_samples: SampleList, dn_meta: Dict[str, int]) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the queries of the upstream network.
Args:
hidden_states (Tensor): Hidden states output from each decoder
layer, has shape (num_decoder_layers, bs, num_queries_total,
dim), where `num_queries_total` is the sum of
`num_denoising_queries` and `num_matching_queries` when
`self.training` is `True`, else `num_matching_queries`.
references (list[Tensor]): List of the reference from the decoder.
The first reference is the `init_reference` (initial) and the
other num_decoder_layers(6) references are `inter_references`
(intermediate). The `init_reference` has shape (bs,
num_queries_total, 4) and each `inter_reference` has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
enc_outputs_class (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
enc_outputs_coord (Tensor): The proposal generate from the
encode feature map, has shape (bs, num_feat_points, 4) with the
last dimension arranged as (cx, cy, w, h).
batch_data_samples (list[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
dict: A dictionary of loss components.
"""
batch_gt_instances = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
outs = self(hidden_states, references)
loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,
batch_gt_instances, batch_img_metas, dn_meta)
losses = self.loss_by_feat(*loss_inputs)
return losses
def loss_by_feat(
self,
all_layers_cls_scores: Tensor,
all_layers_bbox_preds: Tensor,
enc_cls_scores: Tensor,
enc_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Loss function.
Args:
all_layers_cls_scores (Tensor): Classification scores of all
decoder layers, has shape (num_decoder_layers, bs,
num_queries_total, cls_out_channels), where
`num_queries_total` is the sum of `num_denoising_queries`
and `num_matching_queries`.
all_layers_bbox_preds (Tensor): Regression outputs of all decoder
layers. Each is a 4D-tensor with normalized coordinate format
(cx, cy, w, h) and has shape (num_decoder_layers, bs,
num_queries_total, 4).
enc_cls_scores (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
enc_bbox_preds (Tensor): The proposal generate from the encode
feature map, has shape (bs, num_feat_points, 4) with the last
dimension arranged as (cx, cy, w, h).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# extract denoising and matching part of outputs
(all_layers_matching_cls_scores, all_layers_matching_bbox_preds,
all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds) = \
self.split_outputs(
all_layers_cls_scores, all_layers_bbox_preds, dn_meta)
loss_dict = super(DeformableDETRHead, self).loss_by_feat(
all_layers_matching_cls_scores, all_layers_matching_bbox_preds,
batch_gt_instances, batch_img_metas, batch_gt_instances_ignore)
# NOTE DETRHead.loss_by_feat but not DeformableDETRHead.loss_by_feat
# is called, because the encoder loss calculations are different
# between DINO and DeformableDETR.
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
# NOTE The enc_loss calculation of the DINO is
# different from that of Deformable DETR.
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_by_feat_single(
enc_cls_scores, enc_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
if all_layers_denoising_cls_scores is not None:
# calculate denoising loss from all decoder layers
dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
# collate denoising loss
loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
loss_dict['dn_loss_iou'] = dn_losses_iou[-1]
for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \
enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1],
dn_losses_iou[:-1])):
loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i
return loss_dict
def loss_dn(self, all_layers_denoising_cls_scores: Tensor,
all_layers_denoising_bbox_preds: Tensor,
batch_gt_instances: InstanceList, batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]:
"""Calculate denoising loss.
Args:
all_layers_denoising_cls_scores (Tensor): Classification scores of
all decoder layers in denoising part, has shape (
num_decoder_layers, bs, num_denoising_queries,
cls_out_channels).
all_layers_denoising_bbox_preds (Tensor): Regression outputs of all
decoder layers in denoising part. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and has shape
(num_decoder_layers, bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou
of each decoder layers.
"""
return multi_apply(
self._loss_dn_single,
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[Tensor]:
"""Denoising loss for outputs from a single decoder layer.
Args:
dn_cls_scores (Tensor): Classification scores of a single decoder
layer in denoising part, has shape (bs, num_denoising_queries,
cls_out_channels).
dn_bbox_preds (Tensor): Regression outputs of a single decoder
layer in denoising part. Each is a 4D-tensor with normalized
coordinate format (cx, cy, w, h) and has shape
(bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and
`loss_iou`.
"""
cls_reg_targets = self.get_dn_targets(batch_gt_instances,
batch_img_metas, dn_meta)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = \
num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
if len(cls_scores) > 0: | if isinstance(self.loss_cls, QualityFocalLoss): | 7 | 2023-12-23 08:36:47+00:00 | 12k |
SkierProjects/MultiLabelImageClassificationPytorch | src/utils/training/train_model.py | [
{
"identifier": "LoggerFactory",
"path": "src/utils/logging/loggerfactory.py",
"snippet": "class LoggerFactory:\n DEFAULT_LOG_LEVEL = logging.INFO\n LOG_FILE_MAX_BYTES = 10 * 1024 * 1024 # 10 MB\n LOG_FILE_BACKUP_COUNT = 5 # Keep 5 backup files\n LONG_LOG_FORMAT = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n SHORT_LOG_FORMAT = \"%(levelname)s: %(message)s\"\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n @staticmethod\n def setup_logging(loggername, log_file=None, level=None, config=config):\n \"\"\"\n Set up logging configuration for a logger with the specified name.\n\n Parameters:\n logger_name (str): The name of the logger to set up.\n log_file (str): The path to the log file. If None, logs to stdout.\n level (int): The logging level. If None, defaults to the level specified in config.\n config (module): The configuration module with a 'log_level' attribute.\n\n Returns:\n logging.Logger: Configured logger instance.\n \"\"\"\n if level is None:\n level = getattr(logging, config.log_level, LoggerFactory.DEFAULT_LOG_LEVEL)\n \n # Since we are setting up handlers individually, we don't use basicConfig\n logger = logging.getLogger(loggername)\n logger.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(LoggerFactory.SHORT_LOG_FORMAT))\n logger.addHandler(console_handler)\n\n if log_file is not None:\n os.makedirs(os.path.dirname(log_file), exist_ok=True)\n file_handler = logging.handlers.RotatingFileHandler(\n log_file, maxBytes=LoggerFactory.LOG_FILE_MAX_BYTES, backupCount=LoggerFactory.LOG_FILE_BACKUP_COUNT)\n file_handler.setFormatter(logging.Formatter(LoggerFactory.LONG_LOG_FORMAT, LoggerFactory.DATE_FORMAT))\n logger.addHandler(file_handler)\n\n return logger\n\n @staticmethod\n def get_logger(name):\n \"\"\"\n Get a logger with the specified name.\n\n Parameters:\n name (str): The name of the logger to retrieve.\n\n Returns:\n logging.Logger: The logger instance with the given name.\n \"\"\"\n return logging.getLogger(name)"
},
{
"identifier": "ModelTrainer",
"path": "src/utils/training/modeltrainer.py",
"snippet": "class ModelTrainer():\n def __init__(self, device, trainloader, validloader, testloader, config=config):\n \"\"\"\n Initializes the ModelTrainer with the given datasets, device, and configuration.\n\n Parameters:\n device (torch.device): The device on which to train the model.\n trainloader (DataLoader): DataLoader for the training dataset.\n validloader (DataLoader): DataLoader for the validation dataset.\n testloader (DataLoader): DataLoader for the test dataset.\n config (module): Configuration module with necessary attributes.\n \"\"\"\n self.config = config\n self.device = device\n self.trainloader = trainloader\n self.validloader = validloader\n self.testloader = testloader\n self.model = modelfactory.create_model(self.config).to(device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)\n\n # Compute label frequencies and create weights for the loss function\n #self.label_freqs = self.compute_label_frequencies()\n #self.pos_weight = self.compute_loss_weights(self.label_freqs).to(device)\n self.criterion = nn.BCEWithLogitsLoss()#pos_weight=self.pos_weight)\n self.epochs = self.config.num_epochs\n self.lr_scheduler = modelutils.get_learningRate_scheduler(self.optimizer, config)\n self.last_train_loss = 10000\n self.last_valid_loss = 10000\n self.last_valid_f1 = 0\n self.current_lr = self.config.learning_rate\n # Initialize TensorBoard writer\n self.tensorBoardWriter = TensorBoardWriter(config)\n\n modelToLoadPath = pathutils.get_model_to_load_path(self.config)\n if self.config.continue_training and os.path.exists(modelToLoadPath):\n logger.info(\"Loading the best model...\") \n if self.config.embedding_layer_enabled or self.config.gcn_enabled and self.config.model_to_load_raw_weights != \"\":\n self.model, modelData = modelloadingutils.load_pretrained_weights_exclude_classifier(self.model, self.config, False)\n modelData[\"f1_score\"] = 0.0\n else:\n modelData = modelloadingutils.load_model(modelToLoadPath, self.config)\n self.model.load_state_dict(modelData['model_state_dict'])\n self.optimizer.load_state_dict(modelData['optimizer_state_dict'])\n\n self.best_f1_score = modelData[\"f1_score\"]\n self.start_epoch = modelData[\"epoch\"] + 1\n self.epochs = self.epochs + self.start_epoch\n self.best_model_state = {\n 'epoch': modelData[\"epoch\"],\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': self.criterion,\n 'f1_score': self.best_f1_score,\n 'model_name': self.config.model_name,\n 'image_size': self.config.image_size,\n 'requires_grad': self.config.model_requires_grad,\n 'num_classes': self.config.num_classes,\n 'dropout': self.config.model_dropout_prob,\n 'embedding_layer': self.config.embedding_layer_enabled,\n 'gcn_enabled': self.config.gcn_enabled,\n 'batch_size': self.config.batch_size,\n 'optimizer': 'Adam',\n 'loss_function': 'BCEWithLogitsLoss'\n }\n else:\n self.best_f1_score = 0.0\n self.start_epoch = 0\n self.best_model_state = None\n self.current_epoch = self.start_epoch - 1\n self.best_f1_score_at_last_reset = 0\n self.patience_counter = 0\n self.patience = self.config.early_stopping_patience\n \n def __enter__(self):\n \"\"\"\n Enter the runtime context for the ModelTrainer object.\n Allows the ModelTrainer to be used with the 'with' statement, ensuring resources are managed properly.\n\n Returns:\n ModelTrainer: The instance with which the context was entered.\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Exit the runtime context for the ModelTrainer object.\n This method is called after the 'with' block is executed, and it ensures that the TensorBoard writer is closed.\n\n Parameters:\n exc_type: Exception type, if any exception was raised within the 'with' block.\n exc_value: Exception value, the exception instance raised.\n traceback: Traceback object with details of where the exception occurred.\n \"\"\"\n self.tensorBoardWriter.close_writer()\n del self.model\n del self.optimizer\n torch.cuda.empty_cache()\n gc.collect()\n \n def train(self):\n \"\"\"\n Train the model for one epoch using the provided training dataset.\n :return: The average training loss for the epoch.\n \"\"\"\n self.current_epoch += 1\n logger.info('Training')\n self.model.train()\n train_running_loss = 0.0\n for data in tqdm(self.trainloader, total=len(self.trainloader)):\n images, targets = data['image'].to(self.device), data['label'].to(self.device).float()\n self.optimizer.zero_grad()\n\n if (self.config.embedding_layer_enabled or self.config.gcn_enabled):\n label_dropout_rate = 0.3\n use_labels = random.random() > label_dropout_rate\n if use_labels:\n outputs = self.model(images, targets)\n else:\n outputs = self.model(images)\n else:\n outputs = self.model(images)\n loss = self.criterion(outputs, targets)\n train_running_loss += loss.item()\n loss.backward()\n self.optimizer.step()\n \n train_loss = train_running_loss / len(self.trainloader.dataset)\n self.last_train_loss = train_loss\n return train_loss\n \n def validate(self, modelEvaluator, threshold=None):\n \"\"\"\n Validate the model on the validation dataset using a model evaluator.\n\n Parameters:\n modelEvaluator: An instance of the model evaluator class with an 'evaluate' method.\n threshold (Optional[float]): Threshold value for converting probabilities to class labels.\n\n Returns:\n tuple: A tuple containing the average validation loss and the micro-averaged F1 score.\n \"\"\"\n logger.info(\"Validating\")\n valid_loss, valid_f1, _, _ = modelEvaluator.evaluate(self.validloader, self.current_epoch, \"Validation\", threshold=threshold)\n self.last_valid_loss = valid_loss\n self.last_valid_f1 = valid_f1\n self.log_train_validation_results()\n return valid_loss, valid_f1\n \n def learningRateScheduler_check(self):\n \"\"\"\n Check and update the learning rate based on the validation loss. Log the updated learning rate to TensorBoard.\n \"\"\"\n self.lr_scheduler.step(self.last_valid_loss)\n self.current_lr = self.optimizer.param_groups[0]['lr']\n self.tensorBoardWriter.add_scalar('Learning Rate', self.current_lr, self.current_epoch)\n\n def log_train_validation_results(self):\n \"\"\"\n Log training and validation results to the logger and TensorBoard.\n Includes the train loss, validation loss, and validation F1 score for the current epoch.\n \"\"\"\n logger.info(f\"Train Loss: {self.last_train_loss:.4f}\")\n logger.info(f'Validation Loss: {self.last_valid_loss:.4f}')\n logger.info(f'Validation F1 Score: {self.last_valid_f1:.4f}')\n \n self.tensorBoardWriter.add_scalar('Loss/Train', self.last_train_loss, self.current_epoch)\n self.tensorBoardWriter.add_scalar('Loss/Validation', self.last_valid_loss, self.current_epoch)\n self.tensorBoardWriter.add_scalar('F1/Validation', self.last_valid_f1, self.current_epoch)\n\n def log_hparam_results(self, test_loss, test_f1):\n \"\"\"\n Log the hyperparameters and test metrics to TensorBoard.\n This method is used for visualizing the relationship between hyperparameters and the model's performance.\n\n Parameters:\n test_loss (float): The loss on the test dataset.\n test_f1 (float): The F1 score on the test dataset.\n \"\"\"\n hparams = metricutils.filter_dict_for_hparams(self.best_model_state)\n metrics = {\n 'best_val_f1_score': self.best_f1_score,\n 'final_train_loss': self.last_train_loss if self.last_train_loss else 0,\n 'final_valid_loss': self.last_valid_loss if self.last_valid_loss else 0,\n 'test_f1_score': test_f1,\n 'test_loss': test_loss\n }\n self.tensorBoardWriter.add_hparams(hparams, metrics)\n\n def log_gradients(self):\n \"\"\"\n Log the gradients of model parameters to TensorBoard.\n This is done periodically based on the current epoch to monitor training progress and diagnose issues.\n \"\"\"\n if self.current_epoch % 5 == 0: # Choose an interval that makes sense for your training regimen.\n for name, param in self.model.named_parameters():\n self.tensorBoardWriter.add_histogram(f'Parameters/{name}', param, self.current_epoch)\n if param.grad is not None:\n self.tensorBoardWriter.add_histogram(f'Gradients/{name}', param.grad, self.current_epoch)\n \n def check_early_stopping(self):\n \"\"\"\n Check if early stopping criteria are met based on the validation F1 score.\n If the score has not improved by a certain proportion over the patience window,\n trigger early stopping.\n\n Returns:\n bool: True if early stopping is triggered, False otherwise.\n \"\"\"\n improvement_threshold = self.config.early_stopping_threshold\n significant_improvement = False\n if self.last_valid_f1 > self.best_f1_score:\n logger.info(f\"Validation F1 Score improved from {self.best_f1_score:.4f} to {self.last_valid_f1:.4f}\")\n self.best_f1_score = self.last_valid_f1\n self.best_model_state = {\n 'epoch': self.current_epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': self.criterion,\n 'f1_score': self.best_f1_score,\n 'model_name': self.config.model_name,\n 'requires_grad': self.config.model_requires_grad,\n 'num_classes': self.config.num_classes,\n 'dropout': self.config.model_dropout_prob,\n 'embedding_layer': self.config.embedding_layer_enabled,\n 'gcn_enabled': self.config.gcn_enabled,\n 'batch_size': self.config.batch_size,\n 'optimizer': 'Adam',\n 'loss_function': 'BCEWithLogitsLoss'\n }\n\n modelloadingutils.save_best_model(self.best_model_state)\n\n # Check for significant improvement since the last reset of the patience counter\n if self.last_valid_f1 - self.best_f1_score_at_last_reset >= improvement_threshold:\n logger.info(f\"Significant cumulative improvement of {self.last_valid_f1 - self.best_f1_score_at_last_reset:.4f} has been achieved since the last reset.\")\n significant_improvement = True\n self.best_f1_score_at_last_reset = self.last_valid_f1\n self.patience_counter = 0\n \n # Increment patience counter if no significant improvement\n if not significant_improvement:\n self.patience_counter += 1\n\n # If there hasn't been significant improvement over the patience window, trigger early stopping\n if self.patience_counter >= self.patience:\n logger.info(f\"Early stopping triggered after {self.patience} epochs without significant cumulative improvement.\")\n return True\n\n \n def save_final_model(self):\n \"\"\"\n Save the state of the model that achieved the best validation F1 score during training.\n The model state is saved to a file defined by the configuration.\n \"\"\"\n state_to_save = copy.deepcopy(self.best_model_state)\n modelloadingutils.save_final_model(self.best_model_state, self.best_f1_score, self.config)\n self.model.load_state_dict(state_to_save['model_state_dict'])\n\n def compute_label_frequencies(self):\n \"\"\"\n Computes the frequency of each label in the dataset.\n \n Returns:\n label_freqs (torch.Tensor): Tensor containing the frequency of each label.\n \"\"\"\n # Initialize a tensor to hold the frequency of each label.\n # This assumes that the number of labels is known and stored in `self.config.num_classes`.\n label_freqs = torch.zeros(self.config.num_classes, dtype=torch.float)\n\n # Iterate over the dataset and sum the one-hot encoded labels.\n for batch in tqdm(self.trainloader, total=len(self.trainloader)):\n labels = batch[\"label\"]\n label_freqs += labels.sum(dim=0) # Sum along the batch dimension.\n\n # Ensure that there's at least one count for each label to avoid division by zero.\n label_freqs = label_freqs.clamp(min=1) \n return label_freqs\n \n def compute_loss_weights(self, label_freqs):\n \"\"\"\n Computes the weights for each label to be used in the loss function.\n \n Parameters:\n label_freqs (torch.Tensor): Tensor containing the frequency of each label.\n \n Returns:\n weights (torch.Tensor): Tensor containing the weight for each label.\n \"\"\"\n # Compute the inverse frequency weights\n total_counts = label_freqs.sum()\n weights = total_counts / label_freqs\n \n # Normalize weights to prevent them from scaling the loss too much\n weights = weights / weights.mean()\n\n #weights = weights.view(-1) # Ensure it is a 1D tensor with shape [num_classes]\n #assert weights.shape[0] == self.config.num_classes, \"pos_weight must have the same size as num_classes\"\n \n return weights"
},
{
"identifier": "ModelEvaluator",
"path": "src/utils/evaluation/modelevaluator.py",
"snippet": "class ModelEvaluator:\n def __init__(self, model, criterion, device, tensorBoardWriter=None, config=config, model_data=None):\n \"\"\"\n Initializes the ModelEvaluator with a given model, loss criterion, device,\n optional TensorBoard writer, and configuration.\n\n Parameters:\n model (torch.nn.Module): The model to evaluate.\n criterion (function): The loss function.\n device (torch.device): The device to run evaluation on (CPU or GPU).\n tensorBoardWriter (TensorBoardWriter, optional): Writer for TensorBoard logging.\n config (object): An immutable configuration object with necessary parameters.\n \"\"\"\n self.model = model\n self.config = config\n self.criterion = criterion\n self.device = device\n self.num_classes = config.num_classes\n self.tensorBoardWriter = tensorBoardWriter\n self.model_data = model_data\n\n def __enter__(self):\n \"\"\"\n Context management method to use with 'with' statements.\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context management method to close the TensorBoard writer upon exiting the 'with' block.\n \"\"\"\n if self.tensorBoardWriter:\n self.tensorBoardWriter.close_writer()\n del self.model\n torch.cuda.empty_cache()\n gc.collect()\n\n @classmethod\n def from_trainer(cls, model_trainer):\n \"\"\"\n Creates a ModelEvaluator instance from a ModelTrainer instance by extracting\n the relevant attributes.\n\n Parameters:\n model_trainer (ModelTrainer): The trainer instance to extract attributes from.\n\n Returns:\n ModelEvaluator: A new instance of ModelEvaluator.\n \"\"\"\n return cls(\n model=model_trainer.model,\n criterion=model_trainer.criterion,\n device=model_trainer.device,\n config=model_trainer.config,\n tensorBoardWriter=model_trainer.tensorBoardWriter,\n model_data=model_trainer.best_model_state\n )\n \n @classmethod\n def from_file(cls, device, thisconfig, tensorBoardWriter=None):\n \"\"\"\n Creates a ModelEvaluator instance from a model file by loading in the model and preparing it\n to be run.\n\n Parameters:\n device (torch.device): The device to run evaluation on (CPU or GPU).\n tensorBoardWriter (TensorBoardWriter, optional): Writer for TensorBoard logging.\n config (object): An immutable configuration object with necessary parameters.\n \"\"\"\n \n model = modelfactory.create_model(thisconfig).to(device)\n criterion = nn.BCEWithLogitsLoss()\n\n modelToLoadPath = pathutils.get_model_to_load_path(thisconfig)\n if os.path.exists(modelToLoadPath):\n logger.info(\"Loading the best model...\") \n modelData = modelloadingutils.load_model(modelToLoadPath, thisconfig)\n model.load_state_dict(modelData['model_state_dict'])\n else:\n logger.error(f\"Could not find a model at path: {modelToLoadPath}\")\n raise ValueError(f\"Could not find a model at path: {modelToLoadPath}. Check to ensure the config/json value for model_name_to_load is correct!\")\n \n return cls(\n model=model,\n criterion=criterion,\n device=device,\n config=thisconfig,\n tensorBoardWriter=tensorBoardWriter,\n model_data=modelData\n )\n \n def single_image_prediction(self, preprocessed_image, threshold=None):\n \"\"\"Run a prediction for a single preprocessed image.\"\"\"\n self.model.eval() # Set the model to evaluation mode\n \n # Move the preprocessed image to the same device as the model\n preprocessed_image = preprocessed_image.to(self.device)\n \n with torch.no_grad():\n # Add a batch dimension to the image tensor\n image_batch = preprocessed_image.unsqueeze(0)\n outputs = self.model(image_batch)\n if threshold is not None:\n # Move the outputs to the CPU and convert to NumPy before thresholding\n outputs_np = outputs.cpu().numpy()\n outputs_np = metricutils.getpredictions_with_threshold(outputs_np, threshold)\n # Wrap the NumPy array back into a PyTorch tensor if necessary\n outputs = torch.from_numpy(outputs_np)\n # Remove the batch dimension from the outputs before returning\n outputs = outputs.squeeze(0)\n return outputs\n \n def predict(self, data_loader, return_true_labels=True, threshold=None):\n \"\"\"\n Perform inference on the given data_loader and return raw predictions.\n\n Parameters:\n data_loader (DataLoader): DataLoader for inference.\n return_true_labels (bool): If true, return true labels. Otherwise, skip label processing.\n\n Returns:\n prediction_labels (numpy.ndarray): Raw model outputs.\n true_labels (numpy.ndarray, optional): Corresponding true labels, if available and requested.\n avg_loss (float, optional): Average loss over dataset, if labels are available.\n \"\"\"\n self.model.eval() # Set the model to evaluation mode\n prediction_outputs = [] # List to store all raw model outputs\n true_labels = [] # List to store all labels if they are available\n image_paths = [] # List to store all image paths if they are available\n frame_counts = [] # List to store all frame counts if they are available\n total_loss = 0.0 # Initialize total loss\n\n with torch.no_grad(): # Disable gradient calculation for efficiency\n for batch in tqdm(data_loader, total=len(data_loader)):\n images = batch['image'].to(self.device)\n outputs = self.model(images)\n prediction_outputs.append(outputs.cpu().numpy()) # Store raw model outputs\n \n # Process labels if they are available and requested\n if return_true_labels and 'label' in batch:\n labels = batch['label'].to(self.device)\n loss = self.criterion(outputs, labels.float()) # Calculate loss\n total_loss += loss.item() # Accumulate loss\n true_labels.append(labels.cpu().numpy()) # Store labels\n elif not return_true_labels and 'image_path' in batch:\n image_paths.append(batch['image_path'])\n elif not return_true_labels and 'frame_count' in batch:\n frame_counts.append(batch['frame_count'])\n\n # Concatenate all raw outputs and optionally labels from all batches\n prediction_outputs = np.vstack(prediction_outputs)\n results = {'predictions': prediction_outputs}\n \n if return_true_labels and true_labels:\n true_labels = np.vstack(true_labels)\n avg_loss = total_loss / len(data_loader.dataset)\n results['true_labels'] = true_labels\n results['avg_loss'] = avg_loss\n\n if image_paths:\n results['image_paths'] = image_paths\n\n if frame_counts:\n results['frame_counts'] = frame_counts\n\n if threshold != None:\n predictions_binary = metricutils.getpredictions_with_threshold(prediction_outputs, threshold)\n results['predictions'] = predictions_binary\n\n return results\n\n def evaluate_predictions(self, data_loader, prediction_outputs, true_labels, epoch, average, datasetSubset=None, metricMode=None, threshold=None):\n \"\"\"\n Evaluate the model on the given data_loader.\n\n Parameters:\n data_loader (DataLoader): DataLoader for evaluation.\n prediction_outputs (numpy.ndarray): Raw model outputs.\n true_labels (numpy.ndarray): Corresponding true labels.\n epoch (int): The current epoch number, used for TensorBoard logging.\n datasetSubset (str): Indicates the subset of data evaluated (e.g., 'test', 'validation').\n average (str): Indicates the type of averaging to perform when computing metrics. Use None to get per-class metrics.\n metricMode (str, optional): Indicates from where this is being evaluated from (e.g., 'Train', 'Test').\n threshold (float, optional): The threshold value for binary predictions.\n\n Returns:\n f1_score (float): The F1 score of the model on the dataset.\n precision (float): The precision of the model on the dataset.\n recall (float): The recall of the model on the dataset.\n \"\"\"\n\n predictions_binary = metricutils.getpredictions_with_threshold(prediction_outputs, threshold)\n # Compute evaluation metrics\n precision, recall, f1 = metricutils.compute_metrics(true_labels, predictions_binary, average=average)\n # Log images with predictions to TensorBoard for a random batch, if configured\n if metricMode is not None and self.tensorBoardWriter is not None and datasetSubset is not None:\n random_batch_index = random.randint(0, len(data_loader) - 1)\n batch_dict = next(itertools.islice(data_loader, random_batch_index, None))\n images = batch_dict['image'] # Assuming the device transfer happens elsewhere if needed\n labels = batch_dict['label']\n \n start_index = random_batch_index * data_loader.batch_size\n end_index = min((random_batch_index + 1) * data_loader.batch_size, len(predictions_binary))\n\n selected_predictions = predictions_binary[start_index:end_index]\n selected_predictions_tensor = torch.tensor(selected_predictions, device=self.device, dtype=torch.float32)\n self.tensorBoardWriter.write_image_test_results(images, labels, selected_predictions_tensor, epoch, metricMode, datasetSubset)\n\n # Return the average loss and computed metrics\n return f1, precision, recall\n\n def evaluate(self, data_loader, epoch, datasetSubset, metricMode=None, average='micro', threshold=None):\n \"\"\"\n Evaluate the model on the given data_loader.\n\n Parameters:\n data_loader (DataLoader): DataLoader for evaluation.\n epoch (int): The current epoch number, used for TensorBoard logging.\n datasetSubset (str): Indicates the subset of data being evaluated (e.g., 'test', 'validation').\n average (str): Indicates the type of averaging to perform when computing metrics. Use None to get per-class metrics.\n metricMode (str, optional): Indicates from where this is being evaluated from (e.g., 'Train', 'Test').\n threshold (float, optional): The threshold value for binary predictions.\n\n Returns:\n avg_loss (float): The average loss over the dataset.\n f1_score (float): The F1 score of the model on the dataset.\n precision (float): The precision of the model on the dataset.\n recall (float): The recall of the model on the dataset.\n \"\"\"\n # Perform inference and get raw outputs\n prediction_results = self.predict(data_loader)\n all_outputs, all_labels, avg_loss = prediction_results['predictions'], prediction_results['true_labels'], prediction_results['avg_loss']\n\n f1, precision, recall = self.evaluate_predictions(data_loader, all_outputs, all_labels, epoch, average, datasetSubset, metricMode, threshold)\n\n # Return the average loss and computed metrics\n return avg_loss, f1, precision, recall"
},
{
"identifier": "evaluate_model",
"path": "src/utils/evaluation/test_model.py",
"snippet": "def evaluate_model(this_config=config):\n # initialize the computation device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n test_loader = datasetutils.get_data_loader_by_name(\"test\", config=this_config)\n valid_loader = datasetutils.get_data_loader_by_name(\"valid\", config=this_config)\n valid_test_loader = datasetutils.get_data_loader_by_name(\"valid+test\", config=this_config, shuffle=True)\n\n # intialize the model\n with ModelEvaluator.from_file(device, this_config, TensorBoardWriter(config=this_config)) as modelEvaluator:\n epochs = modelEvaluator.model_data[\"epoch\"]\n\n valid_start_time = time.time()\n valid_results = modelEvaluator.predict(valid_loader)\n valid_end_time = time.time()\n\n test_start_time = time.time()\n test_results = modelEvaluator.predict(test_loader)\n test_end_time = time.time()\n\n valid_test_start_time = time.time() \n validtest_results = modelEvaluator.predict(valid_test_loader)\n valid_test_end_time = time.time()\n\n\n valid_predictions, valid_correct_labels, valid_loss = valid_results['predictions'], valid_results['true_labels'], valid_results['avg_loss']\n test_predictions, test_correct_labels, test_loss = test_results['predictions'], test_results['true_labels'], test_results['avg_loss']\n validtest_predictions, validtest_correct_labels, validtest_loss = validtest_results['predictions'], validtest_results['true_labels'], validtest_results['avg_loss']\n\n valid_elapsed_time = valid_end_time - valid_start_time\n test_elapsed_time = test_end_time - test_start_time\n valid_test_elapsed_time = valid_test_end_time - valid_test_start_time\n\n valid_num_images = len(valid_loader.dataset)\n test_num_images = len(test_loader.dataset)\n valid_test_num_images = len(valid_test_loader.dataset)\n\n valid_images_per_second = valid_num_images / valid_elapsed_time\n test_images_per_second = test_num_images / test_elapsed_time\n valid_test_images_per_second = valid_test_num_images / valid_test_elapsed_time\n\n avg_images_per_second = (valid_images_per_second + test_images_per_second + valid_test_images_per_second) / 3\n\n logger.info(f\"Validation Img/sec: {valid_images_per_second}\")\n logger.info(f\"Test Img/sec: {test_images_per_second}\")\n logger.info(f\"Validation+Test Img/sec: {valid_test_images_per_second}\")\n logger.info(f\"Avg Img/sec: {avg_images_per_second}\")\n\n logger.info(f\"Validation Loss: {valid_loss}\")\n logger.info(f\"Test Loss: {test_loss}\")\n logger.info(f\"Validation+Test Loss: {validtest_loss}\")\n\n val_f1_default, val_precision_default, val_recall_default = modelEvaluator.evaluate_predictions(valid_loader, valid_predictions, valid_correct_labels, epochs, threshold=0.5, average=\"micro\")\n test_f1_default, test_precision_default, test_recall_default = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=0.5, average=\"micro\")\n validtest_f1_default, validtest_precision_default, validtest_recall_default = modelEvaluator.evaluate_predictions(valid_test_loader, validtest_predictions, validtest_correct_labels, epochs, threshold=0.5, average=\"micro\")\n\n logger.info(f\"Validation Default F1: F1: {val_f1_default}, Precision: {val_precision_default}, Recall: {val_recall_default} at Threshold: 0.5\")\n logger.info(f\"Test Default F1: F1: {test_f1_default}, Precision: {test_precision_default}, Recall: {test_recall_default} at Threshold: 0.5\")\n logger.info(f\"Valid+Test Default F1: F1: {validtest_f1_default}, Precision: {validtest_precision_default}, Recall: {validtest_recall_default} at Threshold: 0.5\")\n\n\n val_best_f1_threshold, val_f1_valoptimized, val_precision_valoptimized, val_recall_valoptimized = metricutils.find_best_threshold(valid_predictions, valid_correct_labels, \"f1\")\n logger.info(f\"Validation Best F1: F1: {val_f1_valoptimized}, Precision: {val_precision_valoptimized}, Recall: {val_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n test_f1_valoptimized, test_precision_valoptimized, test_recall_valoptimized = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=val_best_f1_threshold, average=\"micro\", datasetSubset=\"Test\", metricMode=\"Test\")\n validtest_f1_valoptimized, validtest_precision_valoptimized, validtest_recall_valoptimized = modelEvaluator.evaluate_predictions(valid_test_loader, validtest_predictions, validtest_correct_labels, epochs, threshold=val_best_f1_threshold, average=\"micro\")\n logger.info(f\"Test Best F1 (measured from Val): F1: {test_f1_valoptimized}, Precision: {test_precision_valoptimized}, Recall: {test_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n logger.info(f\"Valid+Test Best F1 (measured from Val): F1: {validtest_f1_valoptimized}, Precision: {validtest_precision_valoptimized}, Recall: {validtest_recall_valoptimized} at Threshold:{val_best_f1_threshold}\")\n\n best_f1_thresholds_per_class = metricutils.find_best_thresholds_per_class(valid_predictions, valid_correct_labels)\n test_f1_valoptimizedperclass, test_precision_valoptimizedperclass, test_recall_valoptimizedperclass = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=best_f1_thresholds_per_class, average=\"micro\")\n logger.info(f\"Test Best F1 Per Class (Val Optimized): F1: {test_f1_valoptimizedperclass}, Precision: {test_precision_valoptimizedperclass}, Recall: {test_recall_valoptimizedperclass} at Threshold:{best_f1_thresholds_per_class}\")\n\n hparams = metricutils.filter_dict_for_hparams(modelEvaluator.model_data)\n final_metrics = {\n 'F1/Default/Validation': val_f1_default,\n 'F1/Default/Test': test_f1_default,\n 'F1/Default/Valid+Test': validtest_f1_default,\n 'F1/ValOptimizedThreshold/Validation': val_f1_valoptimized,\n 'F1/ValOptimizedThreshold/Test': test_f1_valoptimized,\n 'F1/ValOptimizedThreshold/Valid+Test': validtest_f1_valoptimized,\n 'Precision/Default/Validation': val_precision_default,\n 'Precision/Default/Test': test_precision_default,\n 'Precision/Default/Valid+Test': validtest_precision_default,\n 'Precision/ValOptimizedThreshold/Validation': val_precision_valoptimized,\n 'Precision/ValOptimizedThreshold/Test': test_precision_valoptimized,\n 'Precision/ValOptimizedThreshold/Valid+Test': validtest_precision_valoptimized,\n 'Recall/Default/Validation': val_recall_default,\n 'Recall/Default/Test': test_recall_default,\n 'Recall/Default/Valid+Test': validtest_recall_default,\n 'Recall/ValOptimizedThreshold/Validation': val_recall_valoptimized,\n 'Recall/ValOptimizedThreshold/Test': test_recall_valoptimized,\n 'Recall/ValOptimizedThreshold/Valid+Test': validtest_recall_valoptimized,\n 'F1/ValOptimizedThresholdPerClass/Test': test_f1_valoptimizedperclass,\n 'Precision/ValOptimizedThresholdPerClass/Test': test_precision_valoptimizedperclass,\n 'Recall/ValOptimizedThresholdPerClass/Test': test_recall_valoptimizedperclass,\n 'ImagesPerSecond/Validation': valid_images_per_second,\n 'ImagesPerSecond/Test': test_images_per_second,\n 'ImagesPerSecond/Valid+Test': valid_test_images_per_second,\n 'ImagesPerSecond/Average': avg_images_per_second\n }\n modelEvaluator.tensorBoardWriter.add_scalars_from_dict(final_metrics, epochs)\n modelEvaluator.tensorBoardWriter.add_hparams(hparams, final_metrics)\n\n test_f1s_per_class, _, _ = modelEvaluator.evaluate_predictions(test_loader, test_predictions, test_correct_labels, epochs, threshold=val_best_f1_threshold, average=None)\n tagmappings = datasetutils.get_index_to_tag_mapping()\n for class_index in range(this_config.num_classes):\n modelEvaluator.tensorBoardWriter.add_scalar(f'F1_Class_{tagmappings[class_index]}/ValOptimizedThreshold/Test', test_f1s_per_class[class_index], epochs)"
}
] | from config import config
from src.utils.logging.loggerfactory import LoggerFactory
from src.utils.training.modeltrainer import ModelTrainer
from src.utils.evaluation.modelevaluator import ModelEvaluator
from src.utils.evaluation.test_model import evaluate_model
import torch
import utils.dataset.datasetutils as datasetutils | 8,344 | logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer
| logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer | with ModelTrainer(device, train_loader, valid_loader, test_loader, config=config) as modelTrainer, ModelEvaluator.from_trainer(modelTrainer) as modelEvaluator: | 1 | 2023-12-25 18:45:52+00:00 | 12k |
jpivarski/ragged | src/ragged/_spec_utility_functions.py | [
{
"identifier": "_box",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _box(\n cls: type[array],\n output: ak.Array | np.number | SupportsDLPack,\n *,\n dtype: None | Dtype = None,\n) -> array:\n if isinstance(output, ak.Array):\n impl = output\n shape, dtype_observed = _shape_dtype(output.layout)\n if dtype is not None and dtype != dtype_observed:\n impl = ak.values_astype(impl, dtype)\n else:\n dtype = dtype_observed\n device = ak.backend(output)\n\n elif isinstance(output, np.number):\n impl = np.array(output)\n shape = output.shape\n dtype_observed = output.dtype\n if dtype is not None and dtype != dtype_observed:\n impl = impl.astype(dtype)\n else:\n dtype = dtype_observed\n device = \"cpu\"\n\n else:\n impl = output\n shape = output.shape # type: ignore[union-attr]\n dtype_observed = output.dtype # type: ignore[union-attr]\n if dtype is not None and dtype != dtype_observed:\n impl = impl.astype(dtype)\n else:\n dtype = dtype_observed\n device = \"cpu\" if isinstance(output, np.ndarray) else \"cuda\"\n\n return cls._new(impl, shape, dtype, device) # pylint: disable=W0212"
},
{
"identifier": "_unbox",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _unbox(*inputs: array) -> tuple[ak.Array | SupportsDLPack, ...]:\n if len(inputs) > 1 and any(type(inputs[0]) is not type(x) for x in inputs):\n types = \"\\n\".join(f\"{type(x).__module__}.{type(x).__name__}\" for x in inputs)\n msg = f\"mixed array types: {types}\"\n raise TypeError(msg)\n\n return tuple(x._impl for x in inputs) # pylint: disable=W0212"
},
{
"identifier": "array",
"path": "src/ragged/_spec_array_object.py",
"snippet": "class array: # pylint: disable=C0103\n \"\"\"\n Ragged array class and constructor.\n\n https://data-apis.org/array-api/latest/API_specification/array_object.html\n \"\"\"\n\n # Constructors, internal functions, and other methods that are unbound by\n # the Array API specification.\n\n _impl: ak.Array | SupportsDLPack # ndim > 0 ak.Array or ndim == 0 NumPy or CuPy\n _shape: Shape\n _dtype: Dtype\n _device: Device\n\n @classmethod\n def _new(cls, impl: ak.Array, shape: Shape, dtype: Dtype, device: Device) -> array:\n \"\"\"\n Simple/fast array constructor for internal code.\n \"\"\"\n\n out = cls.__new__(cls)\n out._impl = impl\n out._shape = shape\n out._dtype = dtype\n out._device = device\n return out\n\n def __init__(\n self,\n obj: (\n array\n | ak.Array\n | bool\n | int\n | float\n | complex\n | NestedSequence[bool | int | float | complex]\n | SupportsBufferProtocol\n | SupportsDLPack\n ),\n dtype: None | Dtype | type | str = None,\n device: None | Device = None,\n copy: None | bool = None,\n ):\n \"\"\"\n Primary array constructor, same as `ragged.asarray`.\n\n Args:\n obj: Object to be converted to an array. May be a Python scalar, a\n (possibly nested) sequence of Python scalars, or an object\n supporting the Python buffer protocol or DLPack.\n dtype: Output array data type. If `dtype` is `None`, the output\n array data type is inferred from the data type(s) in `obj`.\n If all input values are Python scalars, then, in order of\n precedence,\n - if all values are of type `bool`, the output data type is\n `bool`.\n - if all values are of type `int` or are a mixture of `bool`\n and `int`, the output data type is `np.int64`.\n - if one or more values are `complex` numbers, the output\n data type is `np.complex128`.\n - if one or more values are `float`s, the output data type\n is `np.float64`.\n device: Device on which to place the created array. If device is\n `None` and `obj` is an array, the output array device is\n inferred from `obj`. If `\"cpu\"`, the array is backed by NumPy\n and resides in main memory; if `\"cuda\"`, the array is backed by\n CuPy and resides in CUDA global memory.\n copy: Boolean indicating whether or not to copy the input. If `True`,\n this function always copies. If `False`, the function never\n copies for input which supports the buffer protocol and raises\n a ValueError in case a copy would be necessary. If `None`, the\n function reuses the existing memory buffer if possible and\n copies otherwise.\n \"\"\"\n\n if isinstance(obj, array):\n self._impl = obj._impl\n self._shape, self._dtype = obj._shape, obj._dtype\n\n elif isinstance(obj, ak.Array):\n self._impl = obj\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n\n elif hasattr(obj, \"__dlpack_device__\") and getattr(obj, \"shape\", None) == ():\n device_type, _ = obj.__dlpack_device__()\n if (\n isinstance(device_type, enum.Enum) and device_type.value == 1\n ) or device_type == 1:\n self._impl = np.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n elif (\n isinstance(device_type, enum.Enum) and device_type.value == 2\n ) or device_type == 2:\n cp = _import.cupy()\n self._impl = cp.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n else:\n msg = f\"unsupported __dlpack_device__ type: {device_type}\"\n raise TypeError(msg)\n\n elif isinstance(obj, (bool, numbers.Complex)):\n self._impl = np.array(obj)\n self._shape, self._dtype = (), self._impl.dtype\n\n else:\n self._impl = ak.Array(obj)\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n\n if dtype is not None and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n\n if dtype is not None and dtype != self._dtype:\n if isinstance(self._impl, ak.Array):\n self._impl = ak.values_astype(self._impl, dtype)\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._impl = np.array(obj, dtype=dtype)\n self._dtype = dtype\n\n if self._dtype.fields is not None:\n msg = f\"dtype must not have fields: dtype.fields = {self._dtype.fields}\"\n raise TypeError(msg)\n\n if self._dtype.shape != ():\n msg = f\"dtype must not have a shape: dtype.shape = {self._dtype.shape}\"\n raise TypeError(msg)\n\n if self._dtype.type not in numeric_types:\n msg = f\"dtype must be numeric (bool, [u]int*, float*, complex*): dtype.type = {self._dtype.type}\"\n raise TypeError(msg)\n\n if device is not None:\n if isinstance(self._impl, ak.Array) and device != ak.backend(self._impl):\n self._impl = ak.to_backend(self._impl, device)\n elif isinstance(self._impl, np.ndarray) and device == \"cuda\":\n cp = _import.cupy()\n self._impl = cp.array(self._impl)\n\n assert copy is None, \"TODO\"\n\n def __str__(self) -> str:\n \"\"\"\n String representation of the array.\n \"\"\"\n\n if len(self._shape) == 0:\n return f\"{self._impl}\"\n elif len(self._shape) == 1:\n return f\"{ak._prettyprint.valuestr(self._impl, 1, 80)}\"\n else:\n prep = ak._prettyprint.valuestr(self._impl, 20, 80 - 4)[1:-1].replace(\n \"\\n \", \"\\n \"\n )\n return f\"[\\n {prep}\\n]\"\n\n def __repr__(self) -> str:\n \"\"\"\n REPL-string representation of the array.\n \"\"\"\n\n if len(self._shape) == 0:\n return f\"ragged.array({self._impl})\"\n elif len(self._shape) == 1:\n return f\"ragged.array({ak._prettyprint.valuestr(self._impl, 1, 80 - 14)})\"\n else:\n prep = ak._prettyprint.valuestr(self._impl, 20, 80 - 4)[1:-1].replace(\n \"\\n \", \"\\n \"\n )\n return f\"ragged.array([\\n {prep}\\n])\"\n\n def tolist(\n self,\n ) -> bool | int | float | complex | NestedSequence[bool | int | float | complex]:\n return self._impl.tolist() # type: ignore[no-any-return,union-attr]\n\n # Attributes: https://data-apis.org/array-api/latest/API_specification/array_object.html#attributes\n\n @property\n def dtype(self) -> Dtype:\n \"\"\"\n Data type of the array elements.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.dtype.html\n \"\"\"\n\n return self._dtype\n\n @property\n def device(self) -> Device:\n \"\"\"\n Hardware device the array data resides on.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html\n \"\"\"\n\n return self._device\n\n @property\n def mT(self) -> array:\n \"\"\"\n Transpose of a matrix (or a stack of matrices).\n\n Raises:\n ValueError: If any ragged dimension's lists are not sorted from longest\n to shortest, which is the only way that left-aligned ragged\n transposition is possible.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.mT.html\n \"\"\"\n\n assert False, \"TODO 1\"\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Number of array dimensions (axes).\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.ndim.html\n \"\"\"\n\n return len(self._shape)\n\n @property\n def shape(self) -> Shape:\n \"\"\"\n Array dimensions.\n\n Regular dimensions are represented by `int` values in the `shape` and\n irregular (ragged) dimensions are represented by `None`.\n\n According to the specification, \"An array dimension must be `None` if\n and only if a dimension is unknown,\" which is a different\n interpretation than we are making here.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.shape.html\n \"\"\"\n\n return self._shape\n\n @property\n def size(self) -> None | int:\n \"\"\"\n Number of elements in an array.\n\n This property never returns `None` because we do not consider\n dimensions to be unknown, and numerical values within ragged\n lists can be counted.\n\n Example:\n An array like `ragged.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])` has\n a size of 5 because it contains 5 numerical values.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html\n \"\"\"\n\n if len(self._shape) == 0:\n return 1\n else:\n return int(ak.count(self._impl))\n\n @property\n def T(self) -> array:\n \"\"\"\n Transpose of the array.\n\n Raises:\n ValueError: If any ragged dimension's lists are not sorted from longest\n to shortest, which is the only way that left-aligned ragged\n transposition is possible.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.T.html\n \"\"\"\n\n assert False, \"TODO 2\"\n\n # methods: https://data-apis.org/array-api/latest/API_specification/array_object.html#methods\n\n def __abs__(self) -> array:\n \"\"\"\n Calculates the absolute value for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__abs__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.abs(self)\n\n def __add__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the sum for each element of an array instance with the\n respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__add__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.add(self, other)\n\n def __and__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i & other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__and__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_and(self, other)\n\n def __array_namespace__(self, *, api_version: None | str = None) -> Any:\n \"\"\"\n Returns an object that has all the array API functions on it.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html\n \"\"\"\n\n import ragged # pylint: disable=C0415,R0401\n\n if api_version is not None and api_version != ragged.__array_api_version__:\n msg = f\"api_version {api_version!r} is not implemented; {ragged.__array_api_version__ = }\"\n raise NotImplementedError(msg)\n\n return ragged\n\n def __bool__(self) -> bool: # FIXME pylint: disable=E0304\n \"\"\"\n Converts a zero-dimensional array to a Python `bool` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__bool__.html\n \"\"\"\n\n return bool(self._impl)\n\n def __complex__(self) -> complex:\n \"\"\"\n Converts a zero-dimensional array to a Python `complex` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__complex__.html\n \"\"\"\n\n return complex(self._impl) # type: ignore[arg-type]\n\n def __dlpack__(self, *, stream: None | int | Any = None) -> PyCapsule:\n \"\"\"\n Exports the array for consumption by `from_dlpack()` as a DLPack\n capsule.\n\n Args:\n stream: CuPy Stream object (https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html)\n if not `None`.\n\n Raises:\n ValueError: If any dimensions are ragged.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack__.html\n \"\"\"\n\n assert stream, \"TODO\"\n assert False, \"TODO 9\"\n\n def __dlpack_device__(self) -> tuple[enum.Enum, int]:\n \"\"\"\n Returns device type and device ID in DLPack format.\n\n Raises:\n ValueError: If any dimensions are ragged.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack_device__.html\n \"\"\"\n\n assert False, \"TODO 10\"\n\n def __eq__(self, other: int | float | bool | array, /) -> array: # type: ignore[override]\n \"\"\"\n Computes the truth value of `self_i == other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__eq__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.equal(self, other)\n\n def __float__(self) -> float:\n \"\"\"\n Converts a zero-dimensional array to a Python `float` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__float__.html\n \"\"\"\n\n return float(self._impl) # type: ignore[arg-type]\n\n def __floordiv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i // other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__floordiv__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.floor_divide(self, other)\n\n def __ge__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i >= other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__ge__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.greater_equal(self, other)\n\n def __getitem__(self, key: GetSliceKey, /) -> array:\n \"\"\"\n Returns self[key].\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__getitem__.html\n \"\"\"\n\n assert False, \"TODO 15\"\n\n def __gt__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i > other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__gt__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.greater(self, other)\n\n def __index__(self) -> int: # FIXME pylint: disable=E0305\n \"\"\"\n Converts a zero-dimensional integer array to a Python `int` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__index__.html\n \"\"\"\n\n return self._impl.__index__() # type: ignore[no-any-return, union-attr]\n\n def __int__(self) -> int:\n \"\"\"\n Converts a zero-dimensional array to a Python `int` object.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__int__.html\n \"\"\"\n\n return int(self._impl) # type: ignore[arg-type]\n\n def __invert__(self) -> array:\n \"\"\"\n Evaluates `~self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__invert__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.bitwise_invert(self)\n\n def __le__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i <= other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__le__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.less_equal(self, other)\n\n def __lshift__(self, other: int | array, /) -> array:\n \"\"\"\n Evaluates `self_i << other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__lshift__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_left_shift(self, other)\n\n def __lt__(self, other: int | float | array, /) -> array:\n \"\"\"\n Computes the truth value of `self_i < other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__lt__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.less(self, other)\n\n def __matmul__(self, other: array, /) -> array:\n \"\"\"\n Computes the matrix product.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__matmul__.html\n \"\"\"\n\n assert False, \"TODO 22\"\n\n def __mod__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i % other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__mod__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.remainder(self, other)\n\n def __mul__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the product for each element of an array instance with the\n respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__mul__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.multiply(self, other)\n\n def __ne__(self, other: int | float | bool | array, /) -> array: # type: ignore[override]\n \"\"\"\n Computes the truth value of `self_i != other_i` for each element of an\n array instance with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__ne__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.not_equal(self, other)\n\n def __neg__(self) -> array:\n \"\"\"\n Evaluates `-self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__neg__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.negative(self)\n\n def __or__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i | other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__or__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_or(self, other)\n\n def __pos__(self) -> array:\n \"\"\"\n Evaluates `+self_i` for each element of an array instance.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__pos__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n return ns.positive(self)\n\n def __pow__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates an implementation-dependent approximation of exponentiation\n by raising each element (the base) of an array instance to the power of\n `other_i` (the exponent), where `other_i` is the corresponding element\n of the array `other`.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__pow__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.pow(self, other)\n\n def __rshift__(self, other: int | array, /) -> array:\n \"\"\"\n Evaluates `self_i >> other_i` for each element of an array instance\n with the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__rshift__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_right_shift(self, other)\n\n def __setitem__(\n self, key: SetSliceKey, value: int | float | bool | array, /\n ) -> None:\n \"\"\"\n Sets `self[key]` to value.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__setitem__.html\n \"\"\"\n\n assert False, \"TODO 31\"\n\n def __sub__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates the difference for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__sub__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.subtract(self, other)\n\n def __truediv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Evaluates `self_i / other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__truediv__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.divide(self, other)\n\n def __xor__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Evaluates `self_i ^ other_i` for each element of an array instance with\n the respective element of the array other.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__xor__.html\n \"\"\"\n\n from ragged import ( # pylint: disable=C0415,R0401\n _spec_elementwise_functions as ns,\n )\n\n if not isinstance(other, array):\n other = array(other, device=self._device)\n\n return ns.bitwise_xor(self, other)\n\n def to_device(self, device: Device, /, *, stream: None | int | Any = None) -> array:\n \"\"\"\n Copy the array from the device on which it currently resides to the\n specified device.\n\n Args:\n device: If `\"cpu\"`, the array is backed by NumPy and resides in\n main memory; if `\"cuda\"`, the array is backed by CuPy and\n resides in CUDA global memory.\n stream: CuPy Stream object (https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html)\n for `device=\"cuda\"`.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html\n \"\"\"\n\n if isinstance(self._impl, ak.Array):\n if device != ak.backend(self._impl):\n assert stream is None, \"TODO\"\n impl = ak.to_backend(self._impl, device)\n else:\n impl = self._impl\n\n elif isinstance(self._impl, np.ndarray):\n # self._impl is a NumPy 0-dimensional array\n if device == \"cuda\":\n assert stream is None, \"TODO\"\n cp = _import.cupy()\n impl = cp.array(self._impl)\n else:\n impl = self._impl\n\n else:\n # self._impl is a CuPy 0-dimensional array\n impl = self._impl.get() if device == \"cpu\" else self._impl # type: ignore[union-attr]\n\n return self._new(impl, self._shape, self._dtype, device)\n\n # in-place operators: https://data-apis.org/array-api/2022.12/API_specification/array_object.html#in-place-operators\n\n def __iadd__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self + other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self + other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __isub__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self - other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self - other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imul__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self * other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self * other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __itruediv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self / other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self / other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ifloordiv__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self // other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self // other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ipow__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self ** other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self**other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imod__(self, other: int | float | array, /) -> array:\n \"\"\"\n Calculates `self = self % other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self % other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __imatmul__(self, other: array, /) -> array:\n \"\"\"\n Calculates `self = self @ other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self @ other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __iand__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self & other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self & other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ior__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self | other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self | other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ixor__(self, other: int | bool | array, /) -> array:\n \"\"\"\n Calculates `self = self ^ other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self ^ other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __ilshift__(self, other: int | array, /) -> array:\n \"\"\"\n Calculates `self = self << other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self << other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n def __irshift__(self, other: int | array, /) -> array:\n \"\"\"\n Calculates `self = self >> other` in-place.\n\n (Internal arrays are immutable; this only replaces the array that the\n Python object points to.)\n \"\"\"\n\n out = self >> other\n self._impl, self._device = out._impl, out._device\n if isinstance(self._impl, ak.Array):\n self._shape, self._dtype = _shape_dtype(self._impl.layout)\n else:\n self._shape, self._dtype = (), self._impl.dtype # type: ignore[union-attr]\n return self\n\n # reflected operators: https://data-apis.org/array-api/2022.12/API_specification/array_object.html#reflected-operators\n\n __radd__ = __add__\n __rsub__ = __sub__\n __rmul__ = __mul__\n __rtruediv__ = __truediv__\n __rfloordiv__ = __floordiv__\n __rpow__ = __pow__\n __rmod__ = __mod__\n __rmatmul__ = __matmul__\n __rand__ = __and__\n __ror__ = __or__\n __rxor__ = __xor__\n __rlshift__ = __lshift__\n __rrshift__ = __rshift__"
},
{
"identifier": "_regularize_axis",
"path": "src/ragged/_spec_statistical_functions.py",
"snippet": "def _regularize_axis(\n axis: None | int | tuple[int, ...], ndim: int\n) -> None | tuple[int, ...]:\n if axis is None:\n return axis\n elif isinstance(axis, numbers.Integral):\n out = axis + ndim if axis < 0 else axis # type: ignore[operator]\n if not 0 <= out < ndim:\n msg = f\"axis {axis} is out of bounds for an array with {ndim} dimensions\"\n raise ak.errors.AxisError(msg)\n return out # type: ignore[no-any-return]\n else:\n out = []\n for x in axis: # type: ignore[union-attr]\n out.append(x + ndim if x < 0 else x)\n if not 0 < out[-1] < ndim:\n msg = f\"axis {x} is out of bounds for an array with {ndim} dimensions\"\n if len(out) == 0:\n msg = \"at least one axis must be specified\"\n raise ak.errors.AxisError(msg)\n return tuple(sorted(out))"
}
] | import numpy as np
from ._spec_array_object import _box, _unbox, array
from ._spec_statistical_functions import _regularize_axis | 10,729 | # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/utility_functions.html
"""
from __future__ import annotations
def all( # pylint: disable=W0622
x: array, /, *, axis: None | int | tuple[int, ...] = None, keepdims: bool = False
) -> array:
"""
Tests whether all input array elements evaluate to `True` along a specified
axis.
Args:
x: Input array.
axis: Axis or axes along which to perform a logical AND reduction. By
default, a logical AND reduction is performed over the entire
array. If a tuple of integers, logical AND reductions are performed
over multiple axes. A valid `axis` must be an integer on the
interval `[-N, N)`, where `N` is the rank (number of dimensions) of
`x`. If an `axis` is specified as a negative integer, the function
must determine the axis along which to perform a reduction by
counting backward from the last dimension (where -1 refers to the
last dimension). If provided an invalid `axis`, the function raises
an exception.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If a logical AND reduction was performed over the entire array, the
returned array is a zero-dimensional array containing the test result;
otherwise, the returned array is a non-zero-dimensional array
containing the test results. The returned array has data type
`np.bool_`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.all.html
"""
| # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/utility_functions.html
"""
from __future__ import annotations
def all( # pylint: disable=W0622
x: array, /, *, axis: None | int | tuple[int, ...] = None, keepdims: bool = False
) -> array:
"""
Tests whether all input array elements evaluate to `True` along a specified
axis.
Args:
x: Input array.
axis: Axis or axes along which to perform a logical AND reduction. By
default, a logical AND reduction is performed over the entire
array. If a tuple of integers, logical AND reductions are performed
over multiple axes. A valid `axis` must be an integer on the
interval `[-N, N)`, where `N` is the rank (number of dimensions) of
`x`. If an `axis` is specified as a negative integer, the function
must determine the axis along which to perform a reduction by
counting backward from the last dimension (where -1 refers to the
last dimension). If provided an invalid `axis`, the function raises
an exception.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If a logical AND reduction was performed over the entire array, the
returned array is a zero-dimensional array containing the test result;
otherwise, the returned array is a non-zero-dimensional array
containing the test results. The returned array has data type
`np.bool_`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.all.html
"""
| axis = _regularize_axis(axis, x.ndim) | 3 | 2023-12-26 10:53:35+00:00 | 12k |
see2023/Bert-VITS2-ext | oldVersion/V111/text/chinese.py | [
{
"identifier": "punctuation",
"path": "oldVersion/V111/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "oldVersion/V111/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals"
}
] | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,673 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub(
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub( | r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text | 0 | 2023-12-27 03:09:11+00:00 | 12k |
chinhsuanwu/ifusion-threestudio | extern/ldm_zero123/models/diffusion/ddpm.py | [
{
"identifier": "AutoencoderKL",
"path": "extern/ldm_zero123/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x"
},
{
"identifier": "IdentityFirstStage",
"path": "extern/ldm_zero123/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "VQModelInterface",
"path": "extern/ldm_zero123/models/autoencoder.py",
"snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec"
},
{
"identifier": "DDIMSampler",
"path": "extern/ldm_zero123/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec"
},
{
"identifier": "CrossAttention",
"path": "extern/ldm_zero123/modules/attention.py",
"snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)"
},
{
"identifier": "extract_into_tensor",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "make_beta_schedule",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "noise_like",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "extern/ldm_zero123/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean"
},
{
"identifier": "normal_kl",
"path": "extern/ldm_zero123/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "LitEma",
"path": "extern/ldm_zero123/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "count_params",
"path": "extern/ldm_zero123/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "default",
"path": "extern/ldm_zero123/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "exists",
"path": "extern/ldm_zero123/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "instantiate_from_config",
"path": "extern/ldm_zero123/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "isimage",
"path": "extern/ldm_zero123/util.py",
"snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "ismap",
"path": "extern/ldm_zero123/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "log_txt_as_img",
"path": "extern/ldm_zero123/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "mean_flat",
"path": "extern/ldm_zero123/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
}
] | import itertools
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from contextlib import contextmanager, nullcontext
from functools import partial
from einops import rearrange, repeat
from omegaconf import ListConfig
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from torch.optim.lr_scheduler import LambdaLR
from torchvision.utils import make_grid
from tqdm import tqdm
from extern.ldm_zero123.models.autoencoder import (
AutoencoderKL,
IdentityFirstStage,
VQModelInterface,
)
from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler
from extern.ldm_zero123.modules.attention import CrossAttention
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_beta_schedule,
noise_like,
)
from extern.ldm_zero123.modules.distributions.distributions import (
DiagonalGaussianDistribution,
normal_kl,
)
from extern.ldm_zero123.modules.ema import LitEma
from extern.ldm_zero123.util import (
count_params,
default,
exists,
instantiate_from_config,
isimage,
ismap,
log_txt_as_img,
mean_flat,
) | 10,504 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 11 | 2023-12-27 20:30:33+00:00 | 12k |
RaceCrewAI/gt-telem | gt_telem/turismo_client.py | [
{
"identifier": "PlayStationNotFoundError",
"path": "gt_telem/errors/playstation_errors.py",
"snippet": "class PlayStationNotFoundError(Exception):\n def __init__(self, message=\"Playstation not found on this network.\"):\n super().__init__(message)"
},
{
"identifier": "PlayStatonOnStandbyError",
"path": "gt_telem/errors/playstation_errors.py",
"snippet": "class PlayStatonOnStandbyError(Exception):\n def __init__(self, playstation_ip):\n message = f\"Playstation {'at '+playstation_ip+' ' if playstation_ip else ''}is on standby.\"\n super().__init__(message)"
},
{
"identifier": "SpanReader",
"path": "gt_telem/models/helpers.py",
"snippet": "class SpanReader:\n \"\"\"\n Utility class for reading binary data in a structured manner.\n \"\"\"\n\n def __init__(self, data, byte_order=\"little\"):\n \"\"\"\n Initialize the SpanReader.\n\n Parameters:\n - data: Binary data to read.\n - byte_order (str): Byte order for interpreting binary data, 'little' or 'big'.\n \"\"\"\n self.view = memoryview(data)\n self.byte_order = \"<\" if byte_order == \"little\" else \">\"\n self.position = 0\n\n def read_int32(self):\n \"\"\"\n Read a 32-bit signed integer from the binary data.\n\n Returns:\n int: The read integer value.\n \"\"\"\n format_string = f\"{self.byte_order}i\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 4\n return value\n\n def read_int16(self):\n \"\"\"\n Read a 16-bit signed integer from the binary data.\n\n Returns:\n int: The read integer value.\n \"\"\"\n format_string = f\"{self.byte_order}h\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 2\n return value\n\n def read_single(self):\n \"\"\"\n Read a 32-bit floating-point number from the binary data.\n\n Returns:\n float: The read floating-point value.\n \"\"\"\n format_string = f\"{self.byte_order}f\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 4\n return value\n\n def read_byte(self):\n \"\"\"\n Read a byte from the binary data.\n\n Returns:\n int: The read byte value.\n \"\"\"\n value = struct.unpack_from(\"B\", self.view, self.position)[0]\n self.position += 1\n return value\n\n def read_bytes(self, length):\n \"\"\"\n Read a specified number of bytes from the binary data.\n\n Parameters:\n - length (int): Number of bytes to read.\n\n Returns:\n bytes: The read bytes.\n \"\"\"\n value = self.view[self.position : self.position + length].tobytes()\n self.position += length\n return value"
},
{
"identifier": "Telemetry",
"path": "gt_telem/models/telemetry.py",
"snippet": "class Telemetry(TelemetryPacket):\n \"\"\"\n Telemetry data from Gran Turismo\n\n Attributes:\n - position_x: float - X-coordinate of the position.\n - position_y: float - Y-coordinate of the position.\n - position_z: float - Z-coordinate of the position.\n - velocity_x: float - X-component of velocity.\n - velocity_y: float - Y-component of velocity.\n - velocity_z: float - Z-component of velocity.\n - rotation_x: float - X-component of rotation.\n - rotation_y: float - Y-component of rotation.\n - rotation_z: float - Z-component of rotation.\n - orientation: float - Orientation.\n - ang_vel_x: float - X-component of angular velocity.\n - ang_vel_y: float - Y-component of angular velocity.\n - ang_vel_z: float - Z-component of angular velocity.\n - body_height: float - Height of the body.\n - engine_rpm: float - Engine RPM.\n - iv: float - IV, used for encryption.\n - fuel_level: float - Fuel level.\n - fuel_capacity: float - Fuel capacity.\n - speed_mps: float - Speed in meters per second.\n - boost_pressure: float - Boost pressure.\n - oil_pressure: float - Oil pressure.\n - water_temp: float - Water temperature.\n - oil_temp: float - Oil temperature.\n - tire_fl_temp: float - Front-left tire temperature.\n - tire_fr_temp: float - Front-right tire temperature.\n - tire_rl_temp: float - Rear-left tire temperature.\n - tire_rr_temp: float - Rear-right tire temperature.\n - packet_id: int - Packet ID.\n - current_lap: int - Current lap.\n - total_laps: int - Total laps.\n - best_lap_time_ms: int - Best lap time in milliseconds.\n - last_lap_time_ms: int - Last lap time in milliseconds.\n - time_of_day_ms: int - Time of day in milliseconds.\n - race_start_pos: int - Race start position.\n - total_cars: int - Total number of cars.\n - min_alert_rpm: int - Minimum alert RPM.\n - max_alert_rpm: int - Maximum alert RPM.\n - calc_max_speed: int - Calculated maximum speed.\n - flags: int - byte that contains current/suggested gear.\n - bits: int - Collection of booleans - see properties.\n - throttle: int - Throttle.\n - brake: int - Brake.\n - empty: int - Unused.\n - road_plane_x: float - X-coordinate of the road plane.\n - road_plane_y: float - Y-coordinate of the road plane.\n - road_plane_z: float - Z-coordinate of the road plane.\n - road_plane_dist: float - Distance of the road plane. Not sure what this is.\n - wheel_fl_rps: float - Front-left wheel revolutions per second.\n - wheel_fr_rps: float - Front-right wheel revolutions per second.\n - wheel_rl_rps: float - Rear-left wheel revolutions per second.\n - wheel_rr_rps: float - Rear-right wheel revolutions per second.\n - tire_fl_radius: float - Front-left tire radius.\n - tire_fr_radius: float - Front-right tire radius.\n - tire_rl_radius: float - Rear-left tire radius.\n - tire_rr_radius: float - Rear-right tire radius.\n - tire_fl_sus_height: float - Front-left tire suspension height.\n - tire_fr_sus_height: float - Front-right tire suspension height.\n - tire_rl_sus_height: float - Rear-left tire suspension height.\n - tire_rr_sus_height: float - Rear-right tire suspension height.\n - unused1: int - Unused variable 1.\n - unused2: int - Unused variable 2.\n - unused3: int - Unused variable 3.\n - unused4: int - Unused variable 4.\n - unused5: int - Unused variable 5.\n - unused6: int - Unused variable 6.\n - unused7: int - Unused variable 7.\n - unused8: int - Unused variable 8.\n - clutch_pedal: float - Clutch pedal position.\n - clutch_engagement: float - Clutch engagement.\n - trans_rpm: float - Transmission RPM.\n - trans_top_speed: float - Transmission top speed.\n - gear1: float - Gear 1.\n - gear2: float - Gear 2.\n - gear3: float - Gear 3.\n - gear4: float - Gear 4.\n - gear5: float - Gear 5.\n - gear6: float - Gear 6.\n - gear7: float - Gear 7.\n - gear8: float - Gear 8.\n - car_code: int - Car code - on vehicles with more than 8 gears, this is corrupted.\n\n Properties:\n - position: Get the position as a Vector3D.\n - velocity: Get the velocity as a Vector3D.\n - rotation: Get the rotation as a Vector3D.\n - angular_velocity: Get the angular velocity as a Vector3D.\n - road_plane: Get the road plane coordinates as a Vector3D.\n - tire_temp: Get tire temperatures as a WheelMetric.\n - wheel_rps: Get wheel revolutions per second as a WheelMetric.\n - tire_radius: Get tire radii as a WheelMetric.\n - suspension_height: Get suspension heights as a WheelMetric.\n - current_gear: Get the current gear.\n - suggested_gear: Get the suggested gear.\n - speed_kph: Get the speed in kilometers per hour.\n - speed_mph: Get the speed in miles per hour.\n - cars_on_track: Check if there are cars on the track.\n - is_paused: Check if the simulation is paused.\n - is_loading: Check if the simulation is loading.\n - in_gear: Check if the vehicle is in gear.\n - has_turbo: Check if the vehicle has a turbo.\n - rev_limit: Check if the vehicle is at the rev limit.\n - hand_brake_active: Check if the hand brake is active.\n - lights_active: Check if the lights are active.\n - high_beams: Check if the high beams are active.\n - low_beams: Check if the low beams are active.\n - asm_active: Check if the ASM (Active Stability Management) is active.\n - tcs_active: Check if the TCS (Traction Control System) is active.\n - unknown_bool_1: Purpose unknown.\n - unknown_bool_2: Purpose unknown.\n - unknown_bool_3: Purpose unknown.\n - unknown_bool_4: Purpose unknown.\n - best_lap_time: Get the formatted best lap time.\n - last_lap_time: Get the formatted last lap time.\n - time_of_day: Get the formatted time of day.\n\n Methods\n - as_dict: Get the state of the object in a dictionary format.\n \"\"\"\n\n def __post_init__(self):\n self.time = datetime.now()\n\n @property\n def position(self) -> Vector3D:\n \"\"\"\n Get the position as a Vector3D.\n \"\"\"\n return Vector3D(self.position_x, self.position_y, self.position_z)\n\n @property\n def velocity(self) -> Vector3D:\n \"\"\"\n Get the velocity as a Vector3D.\n \"\"\"\n return Vector3D(self.velocity_x, self.velocity_y, self.velocity_z)\n\n @property\n def rotation(self) -> Vector3D:\n \"\"\"\n Get the rotation as a Vector3D.\n \"\"\"\n return Vector3D(self.rotation_x, self.rotation_y, self.rotation_z)\n\n @property\n def angular_velocity(self) -> Vector3D:\n \"\"\"\n Get the angular velocity as a Vector3D.\n \"\"\"\n return Vector3D(self.ang_vel_x, self.ang_vel_y, self.ang_vel_z)\n\n @property\n def road_plane(self) -> Vector3D:\n \"\"\"\n Get the road plane coordinates as a Vector3D.\n \"\"\"\n return Vector3D(self.road_plane_x, self.road_plane_y, self.road_plane_z)\n\n @property\n def tire_temp(self) -> WheelMetric:\n \"\"\"\n Get tire temperatures as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_temp, self.tire_fr_temp, self.tire_rl_temp, self.tire_rr_temp\n )\n\n @property\n def wheel_rps(self) -> WheelMetric:\n \"\"\"\n Get wheel revolutions per second as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.wheel_fl_rps, self.wheel_fr_rps, self.wheel_rl_rps, self.wheel_rr_rps\n )\n\n @property\n def tire_radius(self) -> WheelMetric:\n \"\"\"\n Get tire radii as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_radius,\n self.tire_fr_radius,\n self.tire_rl_radius,\n self.tire_rr_radius,\n )\n\n @property\n def suspension_height(self) -> WheelMetric:\n \"\"\"\n Get suspension heights as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_sus_height,\n self.tire_fr_sus_height,\n self.tire_rl_sus_height,\n self.tire_rr_sus_height,\n )\n\n @property\n def current_gear(self) -> int:\n \"\"\"\n Get the current gear.\n \"\"\"\n return self.bits & 0b1111\n\n @property\n def suggested_gear(self) -> int:\n \"\"\"\n Get the suggested gear.\n \"\"\"\n return self.bits >> 4\n\n @property\n def speed_kph(self) -> float:\n \"\"\"\n Get the speed in kilometers per hour.\n \"\"\"\n return self.speed_mps * 3.6\n\n @property\n def speed_mph(self) -> float:\n \"\"\"\n Get the speed in miles per hour.\n \"\"\"\n return self.speed_mps * 2.23694\n\n @property\n def cars_on_track(self) -> bool:\n \"\"\"\n Check if there are cars on the track.\n \"\"\"\n return bool(1<<0 & self.flags)\n\n @property\n def is_paused(self) -> bool:\n \"\"\"\n Check if the simulation is paused.\n \"\"\"\n return bool(1<<1 & self.flags)\n\n @property\n def is_loading(self) -> bool:\n \"\"\"\n Check if the simulation is loading.\n \"\"\"\n return bool(1<<2 & self.flags)\n\n @property\n def in_gear(self) -> bool:\n \"\"\"\n Check if the vehicle is in gear.\n \"\"\"\n return bool(1<<3 & self.flags)\n\n @property\n def has_turbo(self) -> bool:\n \"\"\"\n Check if the vehicle has a turbo.\n \"\"\"\n return bool(1<<4 & self.flags)\n\n @property\n def rev_limit(self) -> bool:\n \"\"\"\n Check if the vehicle is at the rev limit.\n \"\"\"\n return bool(1<<5 & self.flags)\n\n @property\n def hand_brake_active(self) -> bool:\n \"\"\"\n Check if the hand brake is active.\n \"\"\"\n return bool(1<<6 & self.flags)\n\n @property\n def lights_active(self) -> bool:\n \"\"\"\n Check if the lights are active.\n \"\"\"\n return bool(1<<7 & self.flags)\n\n @property\n def high_beams(self) -> bool:\n \"\"\"\n Check if the high beams are active.\n \"\"\"\n return bool(1<<8 & self.flags)\n\n @property\n def low_beams(self) -> bool:\n \"\"\"\n Check if the low beams are active.\n \"\"\"\n return bool(1<<9 & self.flags)\n\n @property\n def asm_active(self) -> bool:\n \"\"\"\n Check if the ASM (Active Stability Management) is active.\n \"\"\"\n return bool(1<<10 & self.flags)\n\n @property\n def tcs_active(self) -> bool:\n \"\"\"\n Check if the TCS (Traction Control System) is active.\n \"\"\"\n return bool(1<<11 & self.flags)\n\n @property\n def unknown_bool_1(self) -> bool:\n \"\"\"\n Get the value of an unknown boolean flag.\n \"\"\"\n return bool(1<<12 & self.flags)\n\n @property\n def unknown_bool_2(self) -> bool:\n \"\"\"\n Not sure\n \"\"\"\n return bool(1<<13 & self.flags)\n\n @property\n def unknown_bool_3(self) -> bool:\n \"\"\"\n Get the value of another unknown boolean flag.\n \"\"\"\n return bool(1<<14 & self.flags)\n\n @property\n def unknown_bool_4(self) -> bool:\n \"\"\"\n Get the value of another unknown boolean flag.\n \"\"\"\n return bool(1<<15 & self.flags)\n\n @property\n def best_lap_time(self) -> str:\n \"\"\"\n Get the formatted best lap time.\n \"\"\"\n if self.best_lap_time_ms == -1:\n return None\n return format_time(self.best_lap_time_ms)\n\n @property\n def last_lap_time(self) -> str:\n \"\"\"\n Get the formatted last lap time.\n \"\"\"\n if self.last_lap_time_ms == -1:\n return None\n return format_time(self.last_lap_time_ms)\n\n @property\n def time_of_day(self) -> str:\n \"\"\"\n Get the formatted time of day.\n \"\"\"\n if self.time_of_day_ms == -1:\n return None\n return format_time_of_day(self.time_of_day_ms)\n\n @property\n def as_dict(self):\n \"\"\"\n Returns a dictionary containing the state of the object.\n \"\"\"\n remove_keys = [\n x\n for x in self.__dict__.keys()\n if any(\n ignore in x\n for ignore in [\n \"_x\",\n \"_y\",\n \"_z\",\n \"flags\",\n \"bits\",\n \"empty\",\n \"unused\",\n \"_fl\",\n \"_fr\",\n \"_rl\",\n \"_rr\",\n ]\n )\n ]\n\n added = {\n \"position\": self.position,\n \"velocity\": self.velocity,\n \"rotation\": self.rotation,\n \"angular_velocity\": self.angular_velocity,\n \"road_plane\": self.road_plane,\n \"tire_temp\": self.tire_temp,\n \"wheel_rps\": self.wheel_rps,\n \"tire_radius\": self.tire_radius,\n \"suspension_height\": self.suspension_height,\n \"current_gear\": self.current_gear,\n \"suggested_gear\": self.suggested_gear,\n \"speed_kph\": self.speed_kph,\n \"speed_mph\": self.speed_mph,\n \"cars_on_track\": self.cars_on_track,\n \"is_paused\": self.is_paused,\n \"is_loading\": self.is_loading,\n \"in_gear\": self.in_gear,\n \"has_turbo\": self.has_turbo,\n \"rev_limit\": self.rev_limit,\n \"hand_brake_active\": self.hand_brake_active,\n \"lights_active\": self.lights_active,\n \"high_beams\": self.high_beams,\n \"low_beams\": self.low_beams,\n \"asm_active\": self.asm_active,\n \"tcs_active\": self.tcs_active,\n \"unknown_bool_1\": self.unknown_bool_1,\n \"unknown_bool_2\": self.unknown_bool_2,\n \"unknown_bool_3\": self.unknown_bool_3,\n \"unknown_bool_4\": self.unknown_bool_4,\n \"best_lap_time\": self.best_lap_time,\n \"last_lap_time\": self.last_lap_time,\n \"time_of_day\": self.time_of_day,\n }\n\n result = dict(self.__dict__, **added)\n for remove_key in remove_keys:\n result.pop(remove_key, None)\n\n return result\n\n @staticmethod\n def from_dict(d):\n \"\"\"\n Get telemetry instance from the as_dict property\n Useful for replays\n \"\"\"\n\n # pop the vector3s\n for vec3 in [\"position\", \"velocity\", \"rotation\", \"angular_velocity\", \"road_plane\"]:\n prop = d.pop(vec3)\n if vec3 == \"angular_velocity\":\n vec3 = \"ang_vel\"\n d[f\"{vec3}_x\"] = prop[0]\n d[f\"{vec3}_y\"] = prop[1]\n d[f\"{vec3}_z\"] = prop[2]\n # pop the corners\n for whmet, attr in {\n \"tire_temp\": \"tire_{0}_temp\",\n \"wheel_rps\": \"wheel_{0}_rps\",\n \"tire_radius\": \"tire_{0}_radius\",\n \"suspension_height\": \"tire_{0}_sus_height\"\n }.items():\n prop = d.pop(whmet)\n for i, k in {\n 0: \"fl\",\n 1: \"fr\",\n 2: \"rl\",\n 3: \"rr\"\n }.items():\n d[attr.format(k)] = prop[i]\n # rebuild the bits attr\n sg = d.pop(\"suggested_gear\") & 0xF\n cg = d.pop(\"current_gear\") & 0xF\n d[\"bits\"] = (sg << 4) | cg\n\n # just remove these:\n for prop in [\"speed_kph\", \"speed_mph\", \"best_lap_time\", \"last_lap_time\", \"time_of_day\"]:\n d.pop(prop)\n\n # Add back ones removed:\n d[\"empty\"] = 0\n for i in range(8):\n d[f\"unused{i+1}\"] = 0\n\n # rebuild flags\n d[\"flags\"] = (\n (1<<0 if d.pop(\"cars_on_track\") else 0) |\n (1<<1 if d.pop(\"is_paused\") else 0) |\n (1<<2 if d.pop(\"is_loading\") else 0) |\n (1<<3 if d.pop(\"in_gear\") else 0) |\n (1<<4 if d.pop(\"has_turbo\") else 0) |\n (1<<5 if d.pop(\"rev_limit\") else 0) |\n (1<<6 if d.pop(\"hand_brake_active\") else 0) |\n (1<<7 if d.pop(\"lights_active\") else 0) |\n (1<<8 if d.pop(\"high_beams\") else 0) |\n (1<<9 if d.pop(\"low_beams\") else 0) |\n (1<<10 if d.pop(\"asm_active\") else 0) |\n (1<<11 if d.pop(\"tcs_active\") else 0) |\n (1<<12 if d.pop(\"unknown_bool_1\", False) else 0) |\n (1<<13 if d.pop(\"clutch_out\", False) else 0) |\n (1<<13 if d.pop(\"unknown_bool_2\", False) else 0) |\n (1<<14 if d.pop(\"unknown_bool_3\", False) else 0) |\n (1<<15 if d.pop(\"unknown_bool_4\", False) else 0)\n )\n\n return Telemetry(**d)"
},
{
"identifier": "PDEncyption",
"path": "gt_telem/net/crypto.py",
"snippet": "class PDEncyption:\n \"\"\"\n PDEncyption class provides methods for decrypting ciphertext using Salsa20 stream cipher.\n Credit to https://github.com/Nenkai/PDTools\n \"\"\"\n\n _DEFAULT_KEY = b\"Simulator Interface Packet ver 0.0\"\n _GT7_KEY = b\"Simulator Interface Packet GT7 ver 0.0\"\n _IV_MASK: int = 0xDEADBEAF\n\n def __init__(self, is_gt7):\n self.is_gt7 = is_gt7\n\n def decrypt(self, ciphertext: bytes) -> bytes:\n \"\"\"\n Decrypts the provided ciphertext using Salsa20 stream cipher.\n\n Parameters:\n - ciphertext (bytes): The encrypted data to be decrypted.\n - is_gt7 (bool): Flag indicating whether to use the GT7 key. Default is True.\n\n Returns:\n bytes: The decrypted plaintext.\n \"\"\"\n seed = struct.unpack(\"<I\", ciphertext[0x40:0x44])[0]\n iv = seed ^ self._IV_MASK\n iv = struct.pack(\"<II\", iv, seed)\n return Salsa20_xor(\n ciphertext, iv, self._GT7_KEY[:32] if self.is_gt7 else self._DEFAULT_KEY[:32]\n )"
},
{
"identifier": "get_ps_ip_type",
"path": "gt_telem/net/device_discover.py",
"snippet": "def get_ps_ip_type() -> tuple[str | None, str | None]:\n \"\"\"\n Discovers the PlayStation IP address and host type using device discovery protocol.\n\n Returns:\n Tuple[Optional[str], Optional[str]]: A tuple containing the PlayStation IP address and host type.\n \"\"\"\n skt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n skt.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n skt.settimeout(1)\n\n query = b\"SRCH * HTTP/1.1\\ndevice-discovery-protocol-version:00030010\"\n\n skt.sendto(query, (\"<broadcast>\", 9302))\n try:\n packet, addr = skt.recvfrom(1024)\n except socket.timeout:\n return None, None\n\n ps_type = _get_host_type(packet.decode(\"utf-8\"))\n host_ip = addr[0]\n\n return host_ip, ps_type"
}
] | import asyncio
import copy
import logging
import socket
import threading
from collections import deque
from time import sleep
from gt_telem.errors.playstation_errors import (PlayStationNotFoundError,
PlayStatonOnStandbyError)
from gt_telem.models.helpers import SpanReader
from gt_telem.models.telemetry import Telemetry
from gt_telem.net.crypto import PDEncyption
from gt_telem.net.device_discover import get_ps_ip_type | 7,953 | loop.run_forever()
except KeyboardInterrupt:
loop.stop()
self._cancellation_token.set()
finally:
# Clean up any resources here if needed
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def run_async(self, cancellation_token: asyncio.Event=None) -> None:
"""
Asynchronously start the telemetry client and run the event loop.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
await asyncio.gather(heartbeat_task, listen_task)
except KeyboardInterrupt:
self._cancellation_token.set()
loop.stop()
finally:
# Clean up any resources here if needed
await loop.shutdown_asyncgens()
async def _send_heartbeat(self) -> None:
"""
Send heartbeat messages at regular intervals to keep the telemetry stream alive.
"""
logging.info("Starting telemetry heartbeat.")
msg: bytes = b"A"
while not self._cancellation_token.is_set():
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(msg, (self.ip_addr, self.RECEIVE_PORT))
udp_socket.close()
await asyncio.sleep(10)
async def _listen(self, loop: asyncio.AbstractEventLoop) -> None:
"""
Listen for incoming telemetry data.
Parameters:
- loop: The asyncio event loop.
"""
logging.info(f"Listening for data on {self.ip_addr}:{self.BIND_PORT}")
class MyDatagramProtocol(asyncio.DatagramProtocol):
def __init__(self, client):
self.client = client
def datagram_received(self, data, addr):
self.client._handle_data(data)
udp_socket, _ = await loop.create_datagram_endpoint(
lambda: MyDatagramProtocol(self),
local_addr=("0.0.0.0", self.BIND_PORT)
)
await self._cancellation_token.wait()
udp_socket.close()
async def _process_telemetry_callbacks(self):
"""
Process telemetry callbacks.
"""
self._processing_callbacks = True
while True:
try:
# Wait for the next telemetry update callback
telemetry_value = await self._telem_callback_queue.get()
# Call the user-provided callback
for cb, args in self._telem_update_callbacks.items():
if args:
await cb(telemetry_value, *args)
else:
await cb(telemetry_value)
# Optionally introduce a delay here if needed
await asyncio.sleep(1 / 60) # 60 Hz update rate
except asyncio.CancelledError:
# The task is cancelled when the event loop is stopped
break
except Exception as e:
# Handle exceptions during callback processing
logging.error(f"Error processing telemetry {cb}: {e}")
self._processing_callbacks = False
def _handle_data(self, data: bytes) -> None:
"""
Handle incoming telemetry data.
Parameters:
- data: Raw telemetry data.
"""
try:
message: bytes = self._crypto.decrypt(data)
except Exception as e:
logging.debug(f"Failed to decrypt. Error: {e}. Wrong system?")
return
# First 4 bytes are header and indicate which system this is
try:
header: str = message[:4].decode("ascii")
except Exception as e:
logging.debug(f"Not sure what this is \n{message[:4]}. Error: {e}")
return
message: bytes = message[4:]
if not header in ["0S7G", "G6S0"]:
# bad data
logging.debug(f"Not sure what this is \n{header}")
return
if header == "0S7G":
|
class TurismoClient:
RECEIVE_PORT = 33339
BIND_PORT = 33340
def __init__(self, is_gt7: bool=True, ps_ip: str=None):
"""
Initialize TurismoClient.
Parameters:
- is_gt7 (bool): Flag indicating whether it's Gran Turismo 7. Default is True.
- ps_ip (str): PlayStation IP address. If None, it will be discovered.
"""
self._cancellation_token = None
ip, ps = get_ps_ip_type()
ip = ip or ps_ip
if not ip:
raise PlayStationNotFoundError()
if ps and "STANDBY" in ps:
raise PlayStatonOnStandbyError(ip)
logging.info(f"Using the {ps} at {ip}")
self.ip_addr: str = ip
if is_gt7:
self.RECEIVE_PORT += 400
self.BIND_PORT += 400
self._crypto: PDEncyption = PDEncyption(is_gt7)
self._telem_lock: threading.Lock = threading.Lock()
# Thread for when run w/o wait:
self._loop_thread = threading.Thread(target=self._run_forever_threaded)
self._telem: Telemetry = None
self._telem_update_callbacks = {}
self._telem_callback_queue = asyncio.LifoQueue(maxsize=1)
self._processing_callbacks = False
@property
def telemetry(self) -> Telemetry:
"""
Get a copy of the telemetry data.
Returns:
Telemetry: A copy of the telemetry data.
"""
if not self._telem:
return None
with self._telem_lock:
cpy: Telemetry = copy.deepcopy(self._telem)
return cpy
@telemetry.setter
def telemetry(self, value: Telemetry) -> None:
"""
Set the telemetry data and call any registered callbacks.
Parameters:
- value (Telemetry): Telemetry data to set.
"""
with self._telem_lock:
self._telem = value
try:
self._telem_callback_queue.put_nowait(value)
except asyncio.QueueFull:
self._telem_callback_queue.get_nowait()
self._telem_callback_queue.put_nowait(value)
if not self._processing_callbacks:
asyncio.create_task(self._process_telemetry_callbacks())
def register_callback(self, callback, args=None):
"""
Register an awaitable callback to be invoked when new telemetry is received.
The telemetry object is sent as the first parameter, and additional
args can be passed if specified.
Callbacks are executed off the main thread, potentially compromising
state integrity (e.g., using `self.` within your callback won't work).
To work around this limitation, declare your callback as a @staticmethod,
pass the class instance (self) as an argument, and receive the context of
the class in your parameters (after telemetry, which is the first).
.. code-block:: python
def __init__(self, tc: TurismoClient):
tc.add_callback(MyClass.parse_telem, [self])
@staticmethod
async def parse_telem(t: Telemetry, context: MyClass):
self = context
Additionally, note that the game sends telemetry at the same frequency as
the frame rate (~60/s). If your callback takes too long to process and exit,
subsequent callbacks will not be invoked until it returns.
"""
self._telem_update_callbacks[callback] = args
def deregister_callback(self, callback):
"""
Deregister a callback.
Parameters:
- callback: Callback to deregister.
"""
self._telem_update_callbacks.pop(callback)
def start(self):
self._loop_thread.start()
def stop(self):
self._cancellation_token.set()
self._loop_thread.join()
def _run_forever_threaded(self, cancellation_token: asyncio.Event=None) -> None:
"""
Start the telemetry client and return immediately. Must provide cancellation token.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
asyncio.run(self.run_async(cancellation_token))
def run(self, cancellation_token: asyncio.Event=None) -> None:
"""
Start the telemetry client and run the event loop. Blocking.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
self._cancellation_token.set()
finally:
# Clean up any resources here if needed
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def run_async(self, cancellation_token: asyncio.Event=None) -> None:
"""
Asynchronously start the telemetry client and run the event loop.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
await asyncio.gather(heartbeat_task, listen_task)
except KeyboardInterrupt:
self._cancellation_token.set()
loop.stop()
finally:
# Clean up any resources here if needed
await loop.shutdown_asyncgens()
async def _send_heartbeat(self) -> None:
"""
Send heartbeat messages at regular intervals to keep the telemetry stream alive.
"""
logging.info("Starting telemetry heartbeat.")
msg: bytes = b"A"
while not self._cancellation_token.is_set():
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(msg, (self.ip_addr, self.RECEIVE_PORT))
udp_socket.close()
await asyncio.sleep(10)
async def _listen(self, loop: asyncio.AbstractEventLoop) -> None:
"""
Listen for incoming telemetry data.
Parameters:
- loop: The asyncio event loop.
"""
logging.info(f"Listening for data on {self.ip_addr}:{self.BIND_PORT}")
class MyDatagramProtocol(asyncio.DatagramProtocol):
def __init__(self, client):
self.client = client
def datagram_received(self, data, addr):
self.client._handle_data(data)
udp_socket, _ = await loop.create_datagram_endpoint(
lambda: MyDatagramProtocol(self),
local_addr=("0.0.0.0", self.BIND_PORT)
)
await self._cancellation_token.wait()
udp_socket.close()
async def _process_telemetry_callbacks(self):
"""
Process telemetry callbacks.
"""
self._processing_callbacks = True
while True:
try:
# Wait for the next telemetry update callback
telemetry_value = await self._telem_callback_queue.get()
# Call the user-provided callback
for cb, args in self._telem_update_callbacks.items():
if args:
await cb(telemetry_value, *args)
else:
await cb(telemetry_value)
# Optionally introduce a delay here if needed
await asyncio.sleep(1 / 60) # 60 Hz update rate
except asyncio.CancelledError:
# The task is cancelled when the event loop is stopped
break
except Exception as e:
# Handle exceptions during callback processing
logging.error(f"Error processing telemetry {cb}: {e}")
self._processing_callbacks = False
def _handle_data(self, data: bytes) -> None:
"""
Handle incoming telemetry data.
Parameters:
- data: Raw telemetry data.
"""
try:
message: bytes = self._crypto.decrypt(data)
except Exception as e:
logging.debug(f"Failed to decrypt. Error: {e}. Wrong system?")
return
# First 4 bytes are header and indicate which system this is
try:
header: str = message[:4].decode("ascii")
except Exception as e:
logging.debug(f"Not sure what this is \n{message[:4]}. Error: {e}")
return
message: bytes = message[4:]
if not header in ["0S7G", "G6S0"]:
# bad data
logging.debug(f"Not sure what this is \n{header}")
return
if header == "0S7G": | sr: SpanReader = SpanReader(message, "little") | 2 | 2023-12-23 03:37:54+00:00 | 12k |
gardenifi/server | app/raspi/services.py | [
{
"identifier": "DayValueException",
"path": "app/raspi/exceptions.py",
"snippet": "class DayValueException(Exception):\n \"\"\"Specific exception definition.\"\"\"\n\n def __init__(self, argument_name):\n self.argument_name = argument_name\n super().__init__(f\"Day is not correct: {argument_name}\")"
},
{
"identifier": "DAYS",
"path": "app/raspi/const.py",
"snippet": "DAYS = [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\"]"
},
{
"identifier": "PROGRAM",
"path": "app/raspi/const.py",
"snippet": "PROGRAM = \"program_\""
},
{
"identifier": "PROGRAM_EXT",
"path": "app/raspi/const.py",
"snippet": "PROGRAM_EXT = \".json\""
},
{
"identifier": "RPI_HW_ID",
"path": "app/raspi/const.py",
"snippet": "RPI_HW_ID = str(\n subprocess.check_output(\n \"cat /proc/cpuinfo | grep Serial | \\\n cut -d ' ' -f2\",\n shell=True,\n )\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n)"
},
{
"identifier": "ARCH",
"path": "app/raspi/const.py",
"snippet": "ARCH = \"arm\""
},
{
"identifier": "MQTT_HOST",
"path": "app/raspi/const.py",
"snippet": "MQTT_HOST = load_env_variable(\"MQTT_HOST\", \"localhost\")"
},
{
"identifier": "MQTT_PORT",
"path": "app/raspi/const.py",
"snippet": "MQTT_PORT = load_env_variable(\"MQTT_PORT\", \"1883\")"
},
{
"identifier": "MQTT_USER",
"path": "app/raspi/const.py",
"snippet": "MQTT_USER = load_env_variable(\"MQTT_USER\", \"user\")"
},
{
"identifier": "MQTT_PASS",
"path": "app/raspi/const.py",
"snippet": "MQTT_PASS = load_env_variable(\"MQTT_PASS\", \"pass\")"
},
{
"identifier": "MAX_NUM_OF_BYTES_CHUNK",
"path": "app/raspi/const.py",
"snippet": "MAX_NUM_OF_BYTES_CHUNK = 512"
},
{
"identifier": "MAX_NUM_OF_BUFFER_TO_ADD",
"path": "app/raspi/const.py",
"snippet": "MAX_NUM_OF_BUFFER_TO_ADD = 5"
},
{
"identifier": "Helpers",
"path": "app/raspi/helpers.py",
"snippet": "class Helpers:\n \"\"\"\n The `Helpers` class provides various helper methods for performing tasks\n such as setting valves, getting system information, storing and loading\n objects to/from files, managing WiFi networks, and updating the `wpa_supplicant.conf` file.\n \"\"\"\n\n __instance = None\n __lock = threading.Lock()\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Helpers class using the singleton design pattern.\n\n Returns:\n An instance of the Helpers class.\n\n Example Usage:\n instance = Helpers()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Helpers class.\n\n This method sets the instance of the Helpers class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Helpers() # Create an instance of the Helpers class\n Helpers.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n cls.__instance = None\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n\n @property\n def toggle_statuses(self):\n \"\"\"\n Getter method for the toggle_statuses property.\n\n Returns:\n dict: A dictionary containing toggle statuses.\n\n Example:\n Access toggle statuses using `instance.toggle_statuses`.\n \"\"\"\n return self._toggle_statuses\n\n @toggle_statuses.setter\n def toggle_statuses(self, value):\n \"\"\"\n Setter method for the toggle_statuses property.\n\n Args:\n value (dict): A dictionary containing toggle statuses to set.\n\n Example:\n Set toggle statuses using `instance.toggle_statuses = new_statuses`.\n \"\"\"\n self._toggle_statuses = value\n\n @property\n def ap_array(self):\n \"\"\"\n Getter method for the _ap_array property.\n\n Returns:\n An array of wifi networks\n\n Example:\n Access toggle statuses using `instance.ap_array`.\n \"\"\"\n return self._ap_array\n\n @ap_array.setter\n def ap_array(self, value):\n \"\"\"\n Setter method for the _ap_array property.\n\n Args:\n value (dict): An array containing the wifi networks to set.\n\n Example:\n Set toggle statuses using `instance.ap_array = new_ap_array`.\n \"\"\"\n self._ap_array = value\n\n def set_valves(self, valves):\n \"\"\"\n Set valve statuses in the toggle_statuses dictionary.\n\n Args:\n valves (str or dict): A string or dictionary representing valve statuses.\n\n Example:\n instance.set_valves('{\"valve1\": true, \"valve2\": false}')\n \"\"\"\n try:\n if isinstance(valves, str):\n valves = ast.literal_eval(valves)\n else:\n valves = ast.literal_eval(str(valves))\n self._toggle_statuses[\"valves\"] = valves\n except Exception as exception:\n logger.error(f\"Error in set_valves: {exception}\")\n raise\n\n def extract_local_ip(self):\n \"\"\"\n Extract the local IP address of the device.\n\n Returns:\n str: The local IP address.\n\n Example:\n local_ip = instance.extract_local_ip()\n \"\"\"\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n tcp_sock.connect((\"8.8.8.8\", 1))\n ip_address = tcp_sock.getsockname()[0]\n except Exception:\n ip_address = \"127.0.0.1\"\n finally:\n tcp_sock.close()\n return ip_address\n\n def get_uptime(self):\n \"\"\"\n Get the system uptime.\n\n Returns:\n str: The system uptime.\n\n Example:\n uptime = instance.get_uptime()\n \"\"\"\n try:\n result = subprocess.run([\"uptime\", \"-p\"], stdout=subprocess.PIPE, text=True, check=True)\n return result.stdout.replace(\"\\n\", \"\")\n except Exception as e:\n logger.error(f\"Error retrieving uptime: {e}\")\n return str(e)\n\n def get_git_commit_id(self):\n \"\"\"\n Get the Git commit ID of the current project.\n\n Returns:\n str: The Git commit ID.\n\n Example:\n commit_id = instance.get_git_commit_id()\n \"\"\"\n # Open the file in read mode ('r')\n try:\n with open(RPI_SERVER_GIT_COMMIT, encoding=\"utf-8\") as file:\n # Read the entire content of the file\n content = file.read().replace(\"\\n\", \"\")\n logger.debug(f\"File content: {content}\")\n return content\n except FileNotFoundError as e:\n logger.error(f\"The file '{RPI_SERVER_GIT_COMMIT}' does not exist.\")\n return str(e)\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error retrieving git log: {e}\")\n return str(e)\n\n def store_object_to_file(self, filename, local_object):\n \"\"\"\n Store a local object to a file using pickle.\n\n Args:\n filename (str): The name of the file to store the object.\n local_object (object): The object to be stored.\n\n Example:\n instance.store_object_to_file('data.pkl', data)\n \"\"\"\n try:\n with open(filename, \"wb\") as obj_file:\n pickle.dump(local_object, obj_file)\n logger.info(f\"Stored local object file: {filename}: {local_object}\")\n obj_file.close()\n return local_object\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def store_toggle_statuses_to_file(self):\n \"\"\"\n Store toggle statuses to a file.\n\n Returns:\n dict: The toggle statuses being stored.\n\n Example:\n stored_statuses = instance.store_toggle_statuses_to_file()\n \"\"\"\n return self.store_object_to_file(STATUSES_FILE, self._toggle_statuses)\n\n def store_wifi_networks_to_file(self):\n \"\"\"\n Store WiFi networks to a file.\n\n Returns:\n list: The WiFi networks being stored.\n\n Example:\n stored_networks = instance.store_wifi_networks_to_file()\n \"\"\"\n return self.store_object_to_file(NETWORKS_FILE, self._ap_array)\n\n def load_object_from_file(self, filename):\n \"\"\"\n Load a local object from a file using pickle.\n\n Args:\n filename (str): The name of the file to load the object from.\n\n Returns:\n object: The loaded object.\n\n Example:\n loaded_object = instance.load_object_from_file('data.pkl')\n \"\"\"\n try:\n local_obj = {}\n with open(filename, \"rb\") as obj_file:\n local_obj = pickle.load(obj_file)\n logger.info(f\"Loaded local object file: {filename}: {local_obj}\")\n obj_file.close()\n return local_obj\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n self.store_object_to_file(filename, local_obj)\n return local_obj\n\n def load_toggle_statuses_from_file(self):\n \"\"\"\n Load toggle statuses from a file and update the instance's _toggle_statuses attribute.\n \"\"\"\n self._toggle_statuses = self.load_object_from_file(STATUSES_FILE)\n\n def load_wifi_networks_from_file(self):\n \"\"\"\n Load WiFi networks from a file and update the instance's _ap_array attribute.\n \"\"\"\n self._ap_array = self.load_object_from_file(NETWORKS_FILE)\n\n def get_timezone(self):\n \"\"\"\n Get the system timezone.\n\n Returns:\n str: The system timezone.\n\n Example:\n timezone = instance.get_timezone()\n \"\"\"\n return str(time.tzname[time.daylight])\n\n def check_empty_toggle(self, valve):\n \"\"\"\n Check if a toggle status is empty for a specific valve and set a default value if it is.\n\n Args:\n valve (str): The name of the valve.\n\n Example:\n instance.check_empty_toggle(\"out1\")\n \"\"\"\n if self._toggle_statuses.get(valve) is None:\n self._toggle_statuses[valve] = 0\n self._toggle_statuses[valve] = self.set_gpio_outputs(self._toggle_statuses[valve], valve)\n\n def get_toggle_statuses(self):\n \"\"\"\n Get and update toggle statuses, system information, and store them to a file.\n\n Returns:\n dict: The updated toggle statuses.\n\n Example:\n updated_statuses = instance.get_toggle_statuses()\n \"\"\"\n if \"valves\" not in self._toggle_statuses:\n self.set_valves([])\n\n self.check_empty_toggle(\"out1\")\n self.check_empty_toggle(\"out2\")\n self.check_empty_toggle(\"out3\")\n self.check_empty_toggle(\"out4\")\n\n self._toggle_statuses[\"server_time\"] = str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n self._toggle_statuses[\"tz\"] = self.get_timezone()\n self._toggle_statuses[\"hw_id\"] = RPI_HW_ID\n\n logger.info(f\"Valves statuses:{self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n\n return self._toggle_statuses\n\n def set_gpio_outputs(self, status, valve):\n \"\"\"\n Set GPIO outputs for a specified valve.\n\n Args:\n status (int): The status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n int: The modified status.\n\n Example:\n modified_status = instance.set_gpio_outputs(1, \"out1\")\n \"\"\"\n status = bool(status in (1, 2))\n logger.info(f\"Set Output of Valve: {valve}::{status}\")\n if ARCH == \"arm\":\n if valve == \"out2\":\n logger.info(f\"===========> Setting PIN 11 GPIO.output...{status}\")\n # RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\n GPIO.output(11, status)\n logger.info(f\"===========> PIN 11 Status GPIO.input: {GPIO.input(11)}\")\n return 1 if status is True else 0\n\n def toggle(self, status, valve):\n \"\"\"\n Toggle a valve, set GPIO outputs, update toggle statuses, and store them to a file.\n\n Args:\n status (int): The new status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n str: A confirmation message.\n\n Example:\n confirmation = instance.toggle(1, \"out1\")\n \"\"\"\n status = self.set_gpio_outputs(status, valve)\n self._toggle_statuses[valve] = status\n logger.info(f\"Modified valves statuses: {self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n return \"OK\"\n\n @property\n def is_connected_to_inet(self):\n \"\"\"\n Get the current internet connection status.\n\n Returns:\n bool: True if connected, False otherwise.\n\n Example:\n connection_status = instance.is_connected_to_inet()\n \"\"\"\n return self._is_connected_to_inet\n\n @is_connected_to_inet.setter\n def is_connected_to_inet(self, value):\n \"\"\"\n Set the current internet connection status.\n\n Returns:\n None\n\n Example:\n instance.is_connected_to_inet = connection_status\n \"\"\"\n self._is_connected_to_inet = value\n\n def system_reboot(self):\n \"\"\"\n Reboot the system after a 2-second delay.\n \"\"\"\n logger.info(\"Rebooting in 2 seconds...\")\n time.sleep(2)\n try:\n subprocess.run([\"reboot\"], stdout=subprocess.PIPE, text=True, check=True)\n except Exception as e:\n logger.error(f\"Error rebooting: {e}\")\n\n def system_update(self):\n \"\"\"\n Update the system through git.\n \"\"\"\n logger.info(\"Git update code and restart...\")\n try:\n subprocess.run([\"/usr/bin/git\", \"pull\"], stdout=subprocess.PIPE, text=True, check=True)\n os.kill(os.getpid(), signal.SIGTERM)\n except Exception as e:\n logger.error(f\"Error updating git: {e}\")\n\n def checking_for_duplicate_ssids(self, ssid, ap_array):\n \"\"\"\n Check for duplicate SSIDs in the list of WiFi networks.\n\n Args:\n ssid (str): The SSID to check.\n ap_array (list): The list of WiFi networks.\n\n Returns:\n bool: True if a duplicate is found, False otherwise.\n\n Example:\n is_duplicate = instance.checking_for_duplicate_ssids(\"MyWiFi\", wifi_networks)\n \"\"\"\n for wifi in ap_array:\n if wifi[\"ssid\"] == ssid:\n return True\n return False\n\n def scan_rpi_wifi_networks(self, refresh=False):\n \"\"\"\n Scan for available WiFi networks and update the instance's _ap_array attribute.\n\n Args:\n refresh (bool): If True, force a refresh of the WiFi networks list.\n\n Returns:\n list: The updated list of WiFi networks.\n\n Example:\n wifi_networks = instance.scan_rpi_wifi_networks()\n \"\"\"\n self._ap_array = []\n index = 0\n if not os.path.exists(NETWORKS_FILE):\n refresh = True\n if refresh:\n if ARCH == \"arm\":\n with subprocess.Popen([\"iwlist\", \"scan\"], stdout=subprocess.PIPE) as iwlist_raw:\n ap_list, err = iwlist_raw.communicate()\n if err is not None:\n logger.error(f\"Popen error: {err}\")\n return self._ap_array\n logger.debug(f\"iwlist scan command output: {ap_list}\")\n for line in ap_list.decode(\"utf-8\").rsplit(\"\\n\"):\n logger.debug(f\"Line: {line}\")\n if \"ESSID\" in line:\n ap_ssid = line[27:-1]\n if ap_ssid != \"\" and not self.checking_for_duplicate_ssids(ap_ssid, self._ap_array):\n index += 1\n logger.info(f\"id = {index}, ssid = {ap_ssid}\")\n wifi_network = {\"id\": index, \"ssid\": str(ap_ssid)}\n self._ap_array.append(json.loads(json.dumps(wifi_network)))\n self.store_wifi_networks_to_file()\n else:\n self._ap_array = []\n else:\n self.load_wifi_networks_from_file()\n\n return self._ap_array\n\n def store_wpa_ssid_key(self, ssid, wifi_key):\n \"\"\"\n Store the WPA SSID and key, and update the WPA supplicant configuration.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if the update is successful, False otherwise.\n\n Example:\n success = instance.store_wpa_ssid_key(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n logger.info(f\"ssid: {ssid}, wifi_key: {wifi_key}\")\n return self.update_wpa_supplicant(ssid, wifi_key)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def is_raspberry_pi_zero(self):\n \"\"\"\n Check whether we're hosted in an RPi Zero or not.\n \"\"\"\n try:\n with open(\"/proc/cpuinfo\", encoding=\"utf8\") as cpuinfo:\n for line in cpuinfo:\n if line.startswith(\"Model\"):\n model_info = line.strip().split(\":\")\n model_name = model_info[1].strip()\n return \"Raspberry Pi Zero\" in model_name\n return False\n except FileNotFoundError as fnfex:\n logger.error(f\"Error: {fnfex}\")\n return False\n\n def write_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Write the WPA supplicant configuration to a temporary file.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n \"\"\"\n with open(WPA_SUPL_CONF_TMP, \"w\", encoding=\"utf8\") as temp_conf_file:\n temp_conf_file.write(\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\\n\")\n temp_conf_file.write(\"update_config=1\\n\")\n temp_conf_file.write(\"\\n\")\n temp_conf_file.write(\"network={\\n\")\n temp_conf_file.write('\tssid=\"' + str(ssid) + '\"\\n')\n if wifi_key == \"\":\n temp_conf_file.write(\"\tkey_mgmt=NONE\\n\")\n else:\n temp_conf_file.write('\tpsk=\"' + str(wifi_key) + '\"\\n')\n temp_conf_file.write(\"}\\n\")\n temp_conf_file.close()\n\n def get_wireless_interface(self):\n \"\"\"\n Get the wireless interface name of the device.\n\n Returns:\n str: The wireless interface name.\n\n Example:\n interface_name = instance.get_wireless_interface()\n \"\"\"\n try:\n ifconfig_output = subprocess.check_output([\"ifconfig\"]).decode(\"utf-8\")\n wireless_interfaces = re.findall(r\"wlan[0-9]+\", ifconfig_output)\n if wireless_interfaces:\n return wireless_interfaces[0]\n except subprocess.CalledProcessError as ex:\n logger.error(f\"Error: {ex}\")\n raise\n return None\n\n def update_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Update the WPA supplicant configuration and check for internet connectivity.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if connected to the internet after the update, False otherwise.\n\n Example:\n connected = instance.update_wpa_supplicant(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n self._is_connected_to_inet = False\n if RUNNING_UNIT_TESTS and ssid == DUMMY_SSID and wifi_key == DUMMY_PASSKEY:\n return True\n # In case of Raspberry Pi Zero NetworkManager stucks. So let's go with the wap_supplicant\n # modification approach.\n if self.is_raspberry_pi_zero():\n self.write_wpa_supplicant(ssid, wifi_key)\n os.system(\n \"cp /etc/wpa_supplicant/wpa_supplicant.conf \\\n /etc/wpa_supplicant/wpa_supplicant.conf.bak\"\n )\n os.system(\"cp \" + WPA_SUPL_CONF_TMP + \" /etc/wpa_supplicant/wpa_supplicant.conf\")\n wpa_cli_cmd = \"sudo wpa_cli -i wlan0 reconfigure\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command {wpa_cli_cmd}:{output.decode('utf8')}\")\n else:\n wpa_cli_cmd = f\"sudo nmcli device wifi connect {ssid} password {wifi_key}\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}:{output.decode('utf8')}`\")\n\n wireless_interface = self.get_wireless_interface()\n logger.info(f\"wireless_interface `{wireless_interface}`\")\n wpa_cli_cmd = f\"wpa_cli -i {wireless_interface} status | grep state | cut -d'=' -f2\"\n logger.info(f\"Command to run: `{wpa_cli_cmd}`\")\n retries = 0\n while retries < 30:\n retries = retries + 1\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}`:{output.decode('utf8')}\")\n if str(output.decode(\"utf8\")) == \"COMPLETED\\n\":\n self._is_connected_to_inet = True\n else:\n time.sleep(2)\n\n logger.info(f\"Connected to internet: {self._is_connected_to_inet}\")\n return self._is_connected_to_inet\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def sleep_and_reboot_for_wpa(self):\n \"\"\"\n Sleep for a short period and then reboot the system.\n \"\"\"\n self.system_reboot()"
}
] | import json
from threading import Thread
from os import path, remove
from loguru import logger
from apscheduler.triggers.combining import OrTrigger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.raspi.exceptions import DayValueException
from app.raspi.const import (
DAYS,
PROGRAM,
PROGRAM_EXT,
RPI_HW_ID,
ARCH,
MQTT_HOST,
MQTT_PORT,
MQTT_USER,
MQTT_PASS,
MAX_NUM_OF_BYTES_CHUNK,
MAX_NUM_OF_BUFFER_TO_ADD,
)
from app.raspi.helpers import Helpers | 7,326 | if adjusted_hour <= 0:
days_passed = -1
elif adjusted_hour >= 24:
days_passed = 1
else:
days_passed = 0
adjusted_hour = adjusted_hour % 24
return adjusted_hour, days_passed
def get_previous_day(self, current_day):
"""
Returns the name of the previous day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the previous day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the previous day
previous_index = (current_index - 1) % len(DAYS)
# Get the name of the previous day
previous_day = DAYS[previous_index]
return previous_day
def get_next_day(self, current_day):
"""
Returns the name of the next day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the next day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the next day
next_index = (current_index + 1) % len(DAYS)
# Get the name of the next day
next_day = DAYS[next_index]
return next_day
def get_start_day_hour(self, day, start_hour, tz_offset):
"""
Checks if the start day or hour should be adjusted based on the provided conditions.
Parameters:
- day (str): The name of the current day (e.g., 'Monday').
- start_hour (int): The original start hour (0 to 23).
- tz_offset (int): The timezone offset in hours (-12 to +14).
Returns:
tuple: A tuple containing the adjusted day and start hour based on the provided conditions.
"""
logger.info(f"Checking whether start_day should change: {day}")
# Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)
start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)
if days_passed == 1:
day = self.get_next_day(day)
elif days_passed == -1:
day = self.get_previous_day(day)
logger.info(f"new start_day: {day}")
logger.info(f"new start_hour: {start_hour}")
return day, start_hour
def get_stop_datetime(self, day, start_hour, start_min, period):
"""
Calculate the stop time for a program cycle.
Parameters:
- day (str): The day of the week.
- start_hour (int): The starting hour.
- start_min (int): The starting minute.
- period (int): The duration of the cycle in minutes.
Returns:
tuple: A tuple containing the stop day, stop hour, and stop minute.
"""
logger.debug(f"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}")
stop_day_index = DAYS.index(day)
logger.debug(f"stop_day_index {stop_day_index}")
stop_min = (start_min + period) % 60
logger.debug(f"stop_min {stop_min}")
if stop_min < start_min:
# should go to the next hour
stop_hour = (start_hour + 1) % 24
# should go to the next day
if stop_hour < start_hour:
stop_day_index = (stop_day_index + 1) % 7
else:
stop_hour = start_hour
logger.debug(f"stop_hour {stop_hour}")
stop_day = DAYS[stop_day_index]
logger.debug(f"stop_day: {stop_day}")
return stop_day, stop_hour, stop_min
def store_program_cycles(self, json_data, store=False) -> None:
"""
Store program cycles and schedule them using the scheduler.
Parameters:
- json_data (dict): JSON data containing program information.
- store (bool, optional): Whether to store the program information. Default is False.
Returns:
None
"""
try:
triggers_to_start = []
triggers_to_stop = []
for day in json_data["days"].split(","):
if day not in DAYS:
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# pylint: disable=too-many-locals
class Services:
"""
The `Services` class provides various methods for managing and controlling
services related to a Raspberry Pi device, such as turning on/off valves,
storing and deleting program cycles, loading program cycles, discovering
WiFi networks, and saving WiFi network configurations.
"""
def __init__(self):
"""Constructor"""
self._scheduler = BackgroundScheduler()
self._scheduler_started = False
@property
def scheduler_started(self):
"""getter"""
return self._scheduler_started
@scheduler_started.setter
def scheduler_started(self, value):
"""setter"""
self._scheduler_started = value
@property
def scheduler(self):
"""getter"""
return self._scheduler
@scheduler.setter
def scheduler(self, value):
"""setter"""
self._scheduler = value
def turn_on_from_program(self, valve):
"""
Turn on a valve based on the program.
Parameters:
- valve (int): The valve number.
Returns:
None
"""
return Helpers().toggle(2, "out" + str(valve))
def turn_off_from_program(self, valve):
"""
Turn off a valve based on the program.
Parameters:
- valve (int): The valve number.
Returns:
None
"""
return Helpers().toggle(0, "out" + str(valve))
def convert_to_utc(self, start_hour, tz_offset):
"""
Converts a given start hour in a specific time zone to Coordinated Universal Time (UTC).
Args:
start_hour (int): The starting hour in the local time zone.
tz_offset (int): The time zone offset in hours. Positive values for time zones ahead of UTC,
negative values for time zones behind UTC.
Returns:
Tuple[int, int]: A tuple containing the adjusted hour in UTC and the number of days passed.
The adjusted hour is in the range [0, 23], and the days_passed is -1, 0, or 1
indicating whether the adjusted hour falls before, within, or after the current day.
Example:
For a local start_hour of 10 and tz_offset of -5 (Eastern Standard Time),
convert_to_utc(10, -5) may return (5, 0), indicating that the adjusted UTC hour is 5 with no days passed.
Note:
The method assumes a 24-hour clock format.
"""
logger.info(f"Checking whether start_hour should change: {start_hour}, tz_offset: {tz_offset}")
# Calculate the adjusted hour
adjusted_hour = start_hour - tz_offset
if adjusted_hour <= 0:
days_passed = -1
elif adjusted_hour >= 24:
days_passed = 1
else:
days_passed = 0
adjusted_hour = adjusted_hour % 24
return adjusted_hour, days_passed
def get_previous_day(self, current_day):
"""
Returns the name of the previous day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the previous day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the previous day
previous_index = (current_index - 1) % len(DAYS)
# Get the name of the previous day
previous_day = DAYS[previous_index]
return previous_day
def get_next_day(self, current_day):
"""
Returns the name of the next day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the next day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the next day
next_index = (current_index + 1) % len(DAYS)
# Get the name of the next day
next_day = DAYS[next_index]
return next_day
def get_start_day_hour(self, day, start_hour, tz_offset):
"""
Checks if the start day or hour should be adjusted based on the provided conditions.
Parameters:
- day (str): The name of the current day (e.g., 'Monday').
- start_hour (int): The original start hour (0 to 23).
- tz_offset (int): The timezone offset in hours (-12 to +14).
Returns:
tuple: A tuple containing the adjusted day and start hour based on the provided conditions.
"""
logger.info(f"Checking whether start_day should change: {day}")
# Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)
start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)
if days_passed == 1:
day = self.get_next_day(day)
elif days_passed == -1:
day = self.get_previous_day(day)
logger.info(f"new start_day: {day}")
logger.info(f"new start_hour: {start_hour}")
return day, start_hour
def get_stop_datetime(self, day, start_hour, start_min, period):
"""
Calculate the stop time for a program cycle.
Parameters:
- day (str): The day of the week.
- start_hour (int): The starting hour.
- start_min (int): The starting minute.
- period (int): The duration of the cycle in minutes.
Returns:
tuple: A tuple containing the stop day, stop hour, and stop minute.
"""
logger.debug(f"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}")
stop_day_index = DAYS.index(day)
logger.debug(f"stop_day_index {stop_day_index}")
stop_min = (start_min + period) % 60
logger.debug(f"stop_min {stop_min}")
if stop_min < start_min:
# should go to the next hour
stop_hour = (start_hour + 1) % 24
# should go to the next day
if stop_hour < start_hour:
stop_day_index = (stop_day_index + 1) % 7
else:
stop_hour = start_hour
logger.debug(f"stop_hour {stop_hour}")
stop_day = DAYS[stop_day_index]
logger.debug(f"stop_day: {stop_day}")
return stop_day, stop_hour, stop_min
def store_program_cycles(self, json_data, store=False) -> None:
"""
Store program cycles and schedule them using the scheduler.
Parameters:
- json_data (dict): JSON data containing program information.
- store (bool, optional): Whether to store the program information. Default is False.
Returns:
None
"""
try:
triggers_to_start = []
triggers_to_stop = []
for day in json_data["days"].split(","):
if day not in DAYS: | raise DayValueException(f"{day} is not correct! Accepted values: {DAYS}") | 0 | 2023-12-22 08:06:09+00:00 | 12k |
bclavie/RAGatouille | ragatouille/RAGTrainer.py | [
{
"identifier": "LateInteractionModel",
"path": "ragatouille/models/base.py",
"snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def train():\n ...\n\n @abstractmethod\n def index(self, name: str, collection: list[str]):\n ...\n\n @abstractmethod\n def add_to_index(self):\n ...\n\n @abstractmethod\n def search(self, name: str, query: Union[str, list[str]]):\n ...\n\n @abstractmethod\n def _search(self, name: str, query: str):\n ...\n\n @abstractmethod\n def _batch_search(self, name: str, queries: list[str]):\n ..."
},
{
"identifier": "ColBERT",
"path": "ragatouille/models/colbert.py",
"snippet": "class ColBERT(LateInteractionModel):\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu: int = -1,\n index_name: Optional[str] = None,\n verbose: int = 1,\n load_from_index: bool = False,\n **kwargs,\n ):\n self.verbose = verbose\n self.collection = None\n if n_gpu == -1:\n n_gpu = 1 if torch.cuda.device_count() == 0 else torch.cuda.device_count()\n\n if load_from_index:\n ckpt_config = ColBERTConfig.load_from_index(\n str(pretrained_model_name_or_path)\n )\n self.config = ckpt_config\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=self.config.experiment, root=self.config.root\n )\n self.checkpoint = self.config.checkpoint\n self.index_name = self.config.index_name\n self.collection = self._get_collection_from_file(\n str(pretrained_model_name_or_path / \"collection.json\")\n )\n else:\n ckpt_config = ColBERTConfig.load_from_checkpoint(\n str(pretrained_model_name_or_path)\n )\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=\"colbert\", root=\".ragatouille/\"\n )\n local_config = ColBERTConfig(**kwargs)\n self.config = ColBERTConfig.from_existing(\n ckpt_config,\n local_config,\n )\n self.checkpoint = pretrained_model_name_or_path\n self.index_name = index_name\n\n self.run_context = Run().context(self.run_config)\n self.run_context.__enter__() # Manually enter the context\n self.searcher = None\n\n def _update_index(self, new_documents: list[str], searcher: Searcher):\n updater = IndexUpdater(\n config=self.config, searcher=searcher, checkpoint=self.checkpoint\n )\n updater.add(new_documents)\n updater.persist_to_disk()\n\n def _get_collection_from_file(self, collection_path: str):\n return srsly.read_json(collection_path)\n\n def _write_collection_to_file(self, collection, collection_path: str):\n srsly.write_json(collection_path, collection)\n\n def add_to_index(\n self,\n new_documents: list[str],\n index_name: Optional[str] = None,\n ):\n self.index_name = index_name if index_name is not None else self.index_name\n if self.index_name is None:\n print(\n \"Cannot add to index without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n\n print(\n \"WARNING: add_to_index support is currently experimental!\",\n \"add_to_index support will be more thorough in future versions\",\n )\n\n searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n verbose=self.verbose,\n )\n new_documents = list(set(new_documents))\n current_len = len(searcher.collection)\n new_doc_len = len(new_documents)\n\n if (\n current_len + new_doc_len < 5000\n or new_doc_len > current_len * 0.05\n or current_len + new_doc_len\n > 100 # Export bug handler -- TODO: Remove this requirement\n ):\n new_documents += [x for x in searcher.collection]\n self.index(\n new_documents,\n index_name=self.index_name,\n max_document_length=self.config.doc_maxlen,\n overwrite=\"force_silent_overwrite\",\n )\n else:\n self._update_index(new_documents, searcher)\n\n print(\n f\"Successfully updated index with {new_doc_len} new documents!\\n\",\n f\"New index size: {new_doc_len + current_len}\",\n )\n\n return str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n\n def index(\n self,\n collection: list[str],\n index_name: Optional[\"str\"] = None,\n max_document_length: int = 256,\n overwrite: Union[bool, str] = \"reuse\",\n ):\n self.config.doc_maxlen = max_document_length\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n f\"No index_name received!\",\n f\"Using default index_name ({self.checkpoint}_new_index)\",\n )\n self.index_name = self.checkpoint + \"new_index\"\n\n collection = list(set(collection))\n self.collection = collection\n\n nbits = 2\n if len(collection) < 5000:\n nbits = 8\n elif len(collection) < 10000:\n nbits = 4\n self.config = ColBERTConfig.from_existing(\n self.config, ColBERTConfig(nbits=nbits)\n )\n self.indexer = Indexer(\n checkpoint=self.checkpoint,\n config=self.config,\n verbose=self.verbose,\n )\n self.indexer.index(\n name=self.index_name, collection=collection, overwrite=overwrite\n )\n\n index_path = str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n self._write_collection_to_file(collection, index_path + \"/collection.json\")\n print(\"Done indexing!\")\n\n def _load_searcher(\n self,\n index_name: Optional[str],\n force_fast: bool = False,\n ):\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n \"Cannot search without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n print(\n f\"Loading searcher for index {self.index_name} for the first time...\",\n \"This may take a few seconds\",\n )\n self.searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n )\n\n if not force_fast:\n if len(self.searcher.collection) < 10000:\n self.searcher.configure(ncells=4)\n self.searcher.configure(centroid_score_threshold=0.4)\n self.searcher.configure(ndocs=512)\n elif len(self.searcher.collection) < 100000:\n self.searcher.configure(ncells=2)\n self.searcher.configure(centroid_score_threshold=0.45)\n self.searcher.configure(ndocs=1024)\n # Otherwise, use defaults for k\n else:\n # Use fast settingss\n self.searcher.configure(ncells=1)\n self.searcher.configure(centroid_score_threshold=0.5)\n self.searcher.configure(ndocs=256)\n\n print(\"Searcher loaded!\")\n\n def search(\n self,\n query: Union[str, list[str]],\n index_name: Optional[\"str\"] = None,\n k: int = 10,\n force_fast: bool = False,\n zero_index_ranks: bool = False,\n ):\n if self.searcher is None or (\n index_name is not None and self.index_name != index_name\n ):\n self._load_searcher(index_name=index_name, force_fast=force_fast)\n\n if isinstance(query, str):\n results = [self._search(query, k)]\n else:\n results = self._batch_search(query, k)\n\n to_return = []\n\n for result in results:\n result_for_query = []\n for id_, rank, score in zip(*result):\n result_for_query.append(\n {\n \"content\": self.searcher.collection[id_],\n \"score\": score,\n \"rank\": rank - 1 if zero_index_ranks else rank,\n }\n )\n to_return.append(result_for_query)\n\n if len(to_return) == 1:\n return to_return[0]\n return to_return\n\n def _search(self, query: str, k: int):\n return self.searcher.search(query, k=k)\n\n def _batch_search(self, query: list[str], k: int):\n queries = {i: x for i, x in enumerate(query)}\n results = self.searcher.search_all(queries, k=k)\n results = [\n [list(zip(*value))[i] for i in range(3)]\n for value in results.todict().values()\n ]\n return results\n\n def train(self, data_dir, training_config: ColBERTConfig):\n training_config = ColBERTConfig.from_existing(self.config, training_config)\n training_config.nway = 2\n with Run().context(self.run_config):\n trainer = Trainer(\n triples=str(data_dir / \"triples.train.colbert.jsonl\"),\n queries=str(data_dir / \"queries.train.colbert.tsv\"),\n collection=str(data_dir / \"corpus.train.colbert.tsv\"),\n config=training_config,\n )\n\n trainer.train(checkpoint=self.checkpoint)\n\n def __del__(self):\n # Clean up context\n self.run_context.__exit__(None, None, None)"
},
{
"identifier": "HardNegativeMiner",
"path": "ragatouille/negative_miners/base.py",
"snippet": "class HardNegativeMiner(ABC):\n @abstractmethod\n def export_index(self, path: Union[str, Path]) -> bool:\n ...\n\n @abstractmethod\n def mine_hard_negatives(\n self,\n queries: list[str],\n collection: list[str],\n neg_k: int,\n ):\n ...\n\n @abstractmethod\n def _mine(\n self,\n queries: list[str],\n k: int,\n ):\n ..."
},
{
"identifier": "SimpleMiner",
"path": "ragatouille/negative_miners/simpleminer.py",
"snippet": "class SimpleMiner(HardNegativeMiner):\n \"\"\"The simplest approach to hard negatives mining.\n Select the most appropriate, small-sized embedding model for the target language.\n And retrieve random negatives in the top 10-100 results.\n Strong baseline for quick, low-engineering hard negative mining.\"\"\"\n\n def __init__(\n self,\n language_code: str,\n model_size: Literal[\"small\", \"base\", \"large\"] = \"small\",\n ) -> None:\n self.n_gpu = torch.cuda.device_count()\n self.target_language = language_code\n self.model_size = model_size\n if language_code not in [\"en\", \"zh\"]:\n language_code = \"other\"\n self.model_name = f\"{language_code}_{model_size}\"\n hub_model = DenseModels[self.model_name].value\n print(f\"Loading Hard Negative SimpleMiner dense embedding model {hub_model}...\")\n self.model = SentenceTransformer(hub_model)\n self.has_index = False\n self.min_rank = 10\n\n def build_index(\n self,\n collection,\n batch_size: int = 128,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n print(f\"Building hard negative index for {len(collection)} documents...\")\n if len(collection) > 1000:\n pool = self.model.start_multi_process_pool()\n embeds = self.model.encode_multi_process(\n collection, pool, batch_size=batch_size\n )\n self.model.stop_multi_process_pool(pool)\n else:\n embeds = self.model.encode(collection, batch_size=batch_size)\n\n print(\"All documents embedded, now adding to index...\")\n\n self.max_rank = min(110, int(len(collection) // 10))\n self.max_rank = min(self.max_rank, len(collection))\n\n storage_type = StorageDataType.Float32\n if len(collection) > 500000 and not force_fp32:\n storage_type = StorageDataType.E4M3\n\n self.voyager_index = Index(\n Space.Cosine,\n num_dimensions=self.model.get_sentence_embedding_dimension(),\n storage_data_type=storage_type,\n )\n\n self.corpus_map = {i: doc for i, doc in enumerate(collection)}\n id_to_vector = {}\n for i, emb in enumerate(embeds):\n id_to_vector[i] = emb\n self.corpus_map[i] = collection[i]\n del embeds\n\n self.voyager_index.add_items(\n vectors=[x for x in id_to_vector.values()],\n ids=[x for x in id_to_vector.keys()],\n num_threads=-1,\n )\n\n del id_to_vector\n\n if save_index:\n print(f\"Saving index to {save_path}...\")\n self.export_index(save_path)\n else:\n print(\"save_index set to False, skipping saving hard negative index\")\n print(\"Hard negative index generated\")\n self.has_index = True\n\n def query_index(self, query, top_k=110):\n results = self.voyager_index.query(\n query, k=min(top_k, self.voyager_index.__len__())\n )\n return results\n\n def mine_hard_negatives(\n self,\n queries: Union[list[str], str],\n collection: Optional[list[str]] = None,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n if self.has_index is False and collection is not None:\n self.build_index(\n collection,\n save_index=save_index,\n save_path=save_path,\n force_fp32=force_fp32,\n )\n if isinstance(queries, str):\n print(\"mining\")\n return self._mine(queries)\n return self._batch_mine(queries)\n\n def _mine(\n self,\n query: str,\n ):\n q_emb = self.model.encode(query)\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n if len(query_results) > self.min_rank:\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x] for x in query_results[0]]\n return query_results\n\n def _batch_mine(\n self,\n queries: list[str],\n ):\n \"\"\"Separate function to parallelise later on\"\"\"\n print(f\"Retrieving hard negatives for {len(queries)} queries...\")\n results = []\n print(\"Embedding queries...\")\n query_embeddings = self.model.encode(queries, show_progress_bar=True)\n print(\"Retrieving hard negatives...\")\n for q_emb in tqdm(query_embeddings):\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x.id] for x in query_results]\n results.append(query_results)\n print(f\"\"\"Done generating hard negatives.\"\"\")\n return results\n\n def export_index(self, path: Union[str, Path]) -> bool:\n self.voyager_index.save(path)\n return True"
},
{
"identifier": "seeded_shuffle",
"path": "ragatouille/utils.py",
"snippet": "def seeded_shuffle(collection: list, seed: int = 42):\n random.seed(seed)\n random.shuffle(collection)\n return collection"
},
{
"identifier": "TrainingDataProcessor",
"path": "ragatouille/data/training_data_processor.py",
"snippet": "class TrainingDataProcessor:\n def __init__(\n self,\n collection: list[str],\n queries: list[str],\n negative_miner=None,\n ):\n self.collection = collection\n self.queries = queries\n self.negative_miner = negative_miner\n self._make_data_map()\n self.training_triplets = []\n\n def process_raw_data(\n self,\n raw_data,\n data_type: Literal[\"pairs\", \"triplets\", \"labeled_pairs\"],\n data_dir: Union[str, Path],\n export: bool = True,\n mine_hard_negatives: bool = True,\n num_new_negatives: int = 10,\n positive_label: int = 1,\n negative_label: int = 0,\n hard_negative_minimum_rank: int = 10,\n ):\n self.negative_miner.min_rank = hard_negative_minimum_rank\n if self.negative_miner is None and mine_hard_negatives:\n raise ValueError(\n \"mine_hard_negatives is True but no negative miner was provided!\"\n )\n if data_type == \"pairs\":\n self._process_raw_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n elif data_type == \"labeled_pairs\":\n self._process_raw_labeled_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n positive_label=positive_label,\n negative_label=negative_label,\n )\n elif data_type == \"triplets\":\n self._process_raw_triplets(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n\n if export:\n self.export_training_data(data_dir)\n\n def _make_individual_triplets(self, query, positives, negatives):\n \"\"\"Create the training data in ColBERT(v1) format from raw lists of triplets\"\"\"\n triplets = []\n q = self.query_map[query]\n random.seed(42)\n if len(positives) > 1:\n all_pos_texts = [p for p in positives]\n max_triplets_per_query = 20\n negs_per_positive = max(1, max_triplets_per_query // len(all_pos_texts))\n initial_triplets_count = 0\n for pos in all_pos_texts:\n p = self.passage_map[pos]\n chosen_negs = random.sample(\n negatives, min(len(negatives), negs_per_positive)\n )\n for neg in chosen_negs:\n n = self.passage_map[neg]\n initial_triplets_count += 1\n triplets.append([q, p, n])\n\n extra_triplets_needed = max_triplets_per_query - initial_triplets_count\n while extra_triplets_needed > 0:\n p = self.passage_map[random.choice(all_pos_texts)]\n n = self.passage_map[random.choice(negatives)]\n triplets.append([q, p, n])\n extra_triplets_needed -= 1\n else:\n p = self.passage_map[positives[0]]\n for n in negatives:\n triplets.append([q, p, self.passage_map[n]])\n\n return triplets\n\n def _get_new_negatives(self, query, passages, mine_hard_negatives, n_new_negatives):\n \"\"\"Generate new negatives for each query, using either:\n - The assigned hard negative miner if mine_hard_negatives is True\n - Randomly sampling from the full collection otherwise\n \"\"\"\n if mine_hard_negatives:\n hard_negatives = self.negative_miner.mine_hard_negatives(\n query, n_new_negatives\n )\n candidates = [\n x\n for x in hard_negatives\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n new_negatives = random.sample(\n candidates,\n min(n_new_negatives, len(candidates)),\n )\n else:\n new_negatives = [\n x\n for x in random.sample(self.collection, n_new_negatives)\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n\n return new_negatives\n\n def _process_raw_pairs(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"Convert unlabeled pairs into training triplets.\n It's assumed unlabeled pairs are always in the format (query, relevant_passage)\"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, positive in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n raw_grouped_triplets[query][\"positives\"] += positive\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_labeled_pairs(\n self,\n raw_data,\n mine_hard_negatives,\n n_new_negatives,\n positive_label,\n negative_label,\n ):\n \"\"\"\n Convert labeled pairs intro training triplets.\n Labeled pairs are in the format (query, passage, label)\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, passage, label in raw_data:\n if isinstance(passage, str):\n passage = [passage]\n if label == positive_label:\n label = \"positives\"\n elif label == negative_label:\n label = \"negatives\"\n else:\n raise ValueError(\n f\"Label {label} must correspond to either positive_label or negative_label!\"\n )\n\n raw_grouped_triplets[query][label] += passage\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_triplets(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"\n Convert raw triplets\n (query, positives : str | list[str], negatives: str | list[str])\n into training triplets.\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n for query, positive, negative in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n if isinstance(negative, str):\n negative = [negative]\n\n raw_grouped_triplets[query][\"positives\"] += positive\n raw_grouped_triplets[query][\"negatives\"] += negative\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _make_data_map(self):\n \"\"\"\n Generate a query_text: query_id and passage_text: passage_id mapping\n To easily generate ColBERT-format training data.\n \"\"\"\n self.query_map = {}\n self.passage_map = {}\n\n for i, query in enumerate(self.queries):\n self.query_map[query] = i\n for i, passage in enumerate(list(self.collection)):\n self.passage_map[passage] = i\n\n def export_training_data(self, path: Union[str, Path]):\n \"\"\"\n Export training data for both training and versioning purposes.\n {path} should ideally be dvc versioned.\n \"\"\"\n\n path = Path(path)\n\n # Create the directory if it does not exist\n os.makedirs(path, exist_ok=True)\n\n with open(path / \"queries.train.colbert.tsv\", \"w\") as f:\n for query, idx in self.query_map.items():\n query = query.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{query}\\n\")\n with open(path / \"corpus.train.colbert.tsv\", \"w\") as f:\n for document, idx in self.passage_map.items():\n document = document.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{document}\\n\")\n\n srsly.write_jsonl(path / \"triples.train.colbert.jsonl\", self.training_triplets)"
}
] | from pathlib import Path
from typing import Union, Literal, Optional
from colbert.infra import ColBERTConfig
from ragatouille.models import LateInteractionModel, ColBERT
from ragatouille.negative_miners import HardNegativeMiner, SimpleMiner
from ragatouille.utils import seeded_shuffle
from ragatouille.data import TrainingDataProcessor | 7,472 |
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
self.negative_miner = SimpleMiner(
language_code=self.language_code,
model_size=hard_negative_model_size,
)
self.negative_miner.build_index(self.collection)
|
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
self.negative_miner = SimpleMiner(
language_code=self.language_code,
model_size=hard_negative_model_size,
)
self.negative_miner.build_index(self.collection)
| self.data_processor = TrainingDataProcessor( | 5 | 2023-12-29 16:26:42+00:00 | 12k |
shibing624/chatgpt-webui | main.py | [
{
"identifier": "http_proxy",
"path": "src/config.py",
"snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):"
},
{
"identifier": "get_model",
"path": "src/models.py",
"snippet": "def get_model(\n model_name,\n lora_model_path=None,\n access_key=None,\n temperature=None,\n top_p=None,\n system_prompt=None,\n user_name=\"\",\n original_model=None,\n):\n msg = i18n(\"模型设置为了:\") + f\" {model_name}\"\n model_type = ModelType.get_type(model_name)\n lora_choices = [\"No LoRA\"]\n if model_type != ModelType.OpenAI:\n config.local_embedding = True\n model = original_model\n chatbot = gr.Chatbot.update(label=model_name)\n try:\n if model_type == ModelType.OpenAI:\n logger.info(f\"正在加载OpenAI模型: {model_name}\")\n model = OpenAIClient(\n model_name=model_name,\n api_key=access_key,\n system_prompt=system_prompt,\n user_name=user_name,\n )\n logger.info(f\"OpenAI模型加载完成: {model_name}\")\n elif model_type == ModelType.OpenAIVision:\n logger.info(f\"正在加载OpenAI Vision模型: {model_name}\")\n access_key = os.environ.get(\"OPENAI_API_KEY\", access_key)\n model = OpenAIVisionClient(\n model_name, api_key=access_key, user_name=user_name)\n elif model_type == ModelType.ChatGLM:\n logger.info(f\"正在加载ChatGLM模型: {model_name}\")\n model = ChatGLMClient(model_name, user_name=user_name)\n elif model_type == ModelType.LLaMA:\n logger.info(f\"正在加载LLaMA模型: {model_name}\")\n model = LLaMAClient(model_name, user_name=user_name)\n elif model_type == ModelType.Unknown:\n raise ValueError(f\"未知模型: {model_name}\")\n except Exception as e:\n logger.error(e)\n logger.info(msg)\n presudo_key = hide_middle_chars(access_key)\n if original_model is not None and model is not None:\n model.history = original_model.history\n model.history_file_path = original_model.history_file_path\n return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=False), access_key, presudo_key"
},
{
"identifier": "postprocess",
"path": "src/overwrites.py",
"snippet": "def postprocess(\n self,\n y,\n):\n \"\"\"\n Parameters:\n y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.\n Returns:\n List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.\n \"\"\"\n if y is None:\n return []\n processed_messages = []\n for message_pair in y:\n assert isinstance(\n message_pair, (tuple, list)\n ), f\"Expected a list of lists or list of tuples. Received: {message_pair}\"\n assert (\n len(message_pair) == 2\n ), f\"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}\"\n\n processed_messages.append(\n [\n self._postprocess_chat_messages(message_pair[0], \"user\"),\n self._postprocess_chat_messages(message_pair[1], \"bot\"),\n ]\n )\n return processed_messages"
},
{
"identifier": "postprocess_chat_messages",
"path": "src/overwrites.py",
"snippet": "def postprocess_chat_messages(\n self, chat_message, role: str\n):\n if chat_message is None:\n return None\n elif isinstance(chat_message, (tuple, list)):\n file_uri = chat_message[0]\n if validate_url(file_uri):\n filepath = file_uri\n else:\n filepath = self.make_temp_copy_if_needed(file_uri)\n\n mime_type = client_utils.get_mimetype(filepath)\n return {\n \"name\": filepath,\n \"mime_type\": mime_type,\n \"alt_text\": chat_message[1] if len(chat_message) > 1 else None,\n \"data\": None, # These last two fields are filled in by the frontend\n \"is_file\": True,\n }\n elif isinstance(chat_message, str):\n # chat_message = inspect.cleandoc(chat_message)\n # escape html spaces\n # chat_message = chat_message.replace(\" \", \" \")\n if role == \"bot\":\n chat_message = convert_bot_before_marked(chat_message)\n elif role == \"user\":\n chat_message = convert_user_before_marked(chat_message)\n return chat_message\n else:\n raise ValueError(f\"Invalid message for Chatbot component: {chat_message}\")"
},
{
"identifier": "reload_javascript",
"path": "src/overwrites.py",
"snippet": "def reload_javascript():\n js = javascript_html()\n js += '<script async type=\"module\" src=\"https://cdn.jsdelivr.net/npm/marked/marked.min.js\"></script>'\n js += '<script async type=\"module\" src=\"https://spin.js.org/spin.umd.js\"></script><link type=\"text/css\" href=\"https://spin.js.org/spin.css\" rel=\"stylesheet\" />'\n js += '<script async src=\"https://cdn.jsdelivr.net/npm/@fancyapps/[email protected]/dist/fancybox/fancybox.umd.js\"></script><link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/@fancyapps/[email protected]/dist/fancybox/fancybox.css\" />'\n\n meta = \"\"\"\n <meta name=\"apple-mobile-web-app-title\" content=\"ChatGPT-WebUI\">\n <meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n <meta name=\"application-name\" content=\"ChatGPT-WebUI\">\n <meta name='viewport' content='width=device-width, initial-scale=1.0, user-scalable=no, viewport-fit=cover'>\n <meta name=\"theme-color\" content=\"#ffffff\">\n \"\"\"\n css = css_html()\n\n def template_response(*args, **kwargs):\n res = GradioTemplateResponseOriginal(*args, **kwargs)\n res.body = res.body.replace(b'</head>', f'{meta}{js}</head>'.encode(\"utf8\"))\n res.body = res.body.replace(b'</body>', f'{css}</body>'.encode(\"utf8\"))\n res.init_headers()\n return res\n\n gr.routes.templates.TemplateResponse = template_response"
},
{
"identifier": "get_html",
"path": "src/overwrites.py",
"snippet": "def get_html(filename):\n path = os.path.join(chuanhu_path, \"assets\", \"html\", filename)\n if os.path.exists(path):\n with open(path, encoding=\"utf8\") as file:\n return file.read()\n return \"\""
},
{
"identifier": "MODELS",
"path": "src/presets.py",
"snippet": "class I18nAuto:\n def __init__(self):\n def __call__(self, key):\nCHATGLM_MODEL = None\nCHATGLM_TOKENIZER = None\nLLAMA_MODEL = None\nLLAMA_INFERENCER = None\nINITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"\nAPI_HOST = \"api.openai.com\"\nOPENAI_API_BASE = \"https://api.openai.com/v1\"\nCHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"\nIMAGES_COMPLETION_URL = \"https://api.openai.com/v1/images/generations\"\nCOMPLETION_URL = \"https://api.openai.com/v1/completions\"\nBALANCE_API_URL = \"https://api.openai.com/dashboard/billing/credit_grants\"\nUSAGE_API_URL = \"https://api.openai.com/dashboard/billing/usage\"\nHISTORY_DIR = os.path.join(pwd_path, '../history')\nTEMPLATES_DIR = os.path.join(pwd_path, '../templates')\nSTANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀\nGENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")\nERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")\nCONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时\nREAD_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时\nPROXY_ERROR_MSG = i18n(\"代理错误,无法获取对话。\") # 代理错误\nSSL_ERROR_PROMPT = i18n(\"SSL错误,无法获取对话。\") # SSL 错误\nNO_APIKEY_MSG = i18n(\"API key为空,请检查是否输入正确。\") # API key 长度不足 51 位\nNO_INPUT_MSG = i18n(\"请输入对话内容。\") # 未输入对话内容\nBILLING_NOT_APPLICABLE_MSG = i18n(\"账单信息不适用\") # 本地运行的模型返回的账单信息\nTIMEOUT_STREAMING = 60 # 流式对话时的超时时间\nTIMEOUT_ALL = 200 # 非流式对话时的超时时间\nENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框\nHIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True\nCONCURRENT_COUNT = 100 # 允许同时使用的用户数量\nSIM_K = 5\nINDEX_QUERY_TEMPRATURE = 1.0\nCHUANHU_TITLE = i18n(\"ChatGPT 🚀\")\nCHUANHU_DESCRIPTION = i18n(\"GitHub: [shibing624/chatgpt-webui](https://github.com/shibing624/chatgpt-webui)\")\nONLINE_MODELS = [\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n \"gpt-3.5-turbo-0301\",\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-1106\",\n \"gpt-4\",\n \"gpt-4-32k\",\n \"gpt-4-1106-preview\",\n \"gpt-4-vision-preview\",\n]\nMODEL_TOKEN_LIMIT = {\n \"gpt-3.5-turbo\": 4096,\n \"gpt-3.5-turbo-16k\": 16384,\n \"gpt-3.5-turbo-0301\": 4096,\n \"gpt-3.5-turbo-0613\": 4096,\n \"gpt-3.5-turbo-1106\": 16384,\n \"gpt-4\": 8192,\n \"gpt-4-32k\": 32768,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-vision-preview\": 128000,\n}\nLOCAL_MODELS = {\n \"chatglm3-6b\": \"THUDM/chatglm3-6b\",\n \"llama-2-7b-chat\": \"TheBloke/Llama-2-7B-Chat-GPTQ\",\n \"yi-6b-chat-8bits\": \"01-ai/Yi-6B-Chat-8bits\",\n \"yi-6b-chat\": \"01-ai/Yi-6B-Chat\",\n}\nMODELS = ONLINE_MODELS + list(LOCAL_MODELS.keys())\nDEFAULT_MODEL = 0\nTOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。\nDEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限\nREDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。\nREPLY_LANGUAGES = [\n \"简体中文\",\n \"繁體中文\",\n \"English\",\n \"日本語\",\n \"Español\",\n \"Français\",\n \"Deutsch\",\n \"跟随问题语言(不稳定)\"\n]\nHISTORY_NAME_METHODS = [\n i18n(\"根据日期时间\"),\n i18n(\"第一条提问\"),\n i18n(\"模型自动总结(消耗tokens)\"),\n]\nWEBSEARCH_PTOMPT_TEMPLATE = \"\"\"\\\nWeb search results:\n\n{web_results}\nCurrent date: {current_date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: {query}\nReply in {reply_language}\n\"\"\"\nPROMPT_TEMPLATE = \"\"\"\\\nContext information is below.\n---------------------\n{context_str}\n---------------------\nCurrent date: {current_date}.\nUsing the provided context information, write a comprehensive reply to the given query.\nMake sure to cite results using [number] notation after the reference.\nIf the provided context information refer to multiple subjects with the same name, write separate answers for each subject.\nUse prior knowledge only if the given context didn't provide enough information.\nAnswer the question: {query_str}\nReply in {reply_language}\n\"\"\"\nREFINE_TEMPLATE = \"\"\"\\\nThe original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer\n(only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better\nReply in {reply_language}\nIf the context isn't useful, return the original answer.\n\"\"\"\nSUMMARIZE_PROMPT = \"\"\"Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN 中文:\"\"\"\nSUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"\nALREADY_CONVERTED_MARK = \"<!-- ALREADY CONVERTED BY PARSER. -->\"\nSTART_OF_OUTPUT_MARK = \"<!-- SOO IN MESSAGE -->\"\nEND_OF_OUTPUT_MARK = \"<!-- EOO IN MESSAGE -->\""
},
{
"identifier": "delete_chat_history",
"path": "src/utils.py",
"snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True"
}
] | import gradio as gr
from loguru import logger
from src.config import (
http_proxy,
hide_history_when_not_logged_in,
chat_name_method_index,
my_api_key, multi_api_key, server_name,
server_port, share, config_file, api_host,
authflag,
dockerflag,
show_api_billing,
latex_delimiters_set,
user_avatar, bot_avatar,
update_doc_config,
)
from src.models import get_model
from src.overwrites import (
postprocess, postprocess_chat_messages,
reload_javascript, get_html,
)
from src.presets import (
MODELS,
HISTORY_NAME_METHODS,
small_and_beautiful_theme,
CONCURRENT_COUNT,
CHUANHU_TITLE,
HIDE_MY_KEY,
DEFAULT_MODEL,
REPLY_LANGUAGES,
INITIAL_SYSTEM_PROMPT,
ENABLE_STREAMING_OPTION,
CHUANHU_DESCRIPTION,
favicon_path,
API_HOST,
HISTORY_DIR,
assets_path,
)
from src.utils import (
delete_chat_history,
filter_history,
get_history_list,
auto_name_chat_history,
get_template_dropdown,
rename_chat_history,
init_history_list,
get_first_history_name,
setup_wizard,
auth_from_conf,
get_geoip,
get_template_names,
load_template,
get_history_names,
reset,
predict,
interrupt,
retry,
i18n,
dislike,
toggle_like_btn_visibility,
set_key,
set_single_turn,
hide_middle_chars,
set_system_prompt,
start_outputing,
set_token_upper_limit,
set_temperature,
set_user_identifier,
set_top_p,
delete_first_conversation,
delete_last_conversation,
set_n_choices,
set_logit_bias,
load_chat_history,
end_outputing,
set_max_tokens,
reset_default,
reset_textbox,
set_stop_sequence,
set_presence_penalty, set_frequency_penalty,
upload_chat_history,
export_markdown,
billing_info,
get_template_content,
like,
transfer_input,
handle_file_upload,
handle_summarize_index,
) | 7,923 | single_turn_checkbox.change(
set_single_turn, [current_model, single_turn_checkbox], None, show_progress=False)
model_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model], [
current_model, status_display, chatbot, lora_select_dropdown, user_api_key,
keyTxt], show_progress=True, api_name="get_model")
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
like_dislike_area], show_progress=False)
lora_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model],
[current_model, status_display, chatbot], show_progress=True)
# Template
systemPromptTxt.input(set_system_prompt, [
current_model, systemPromptTxt], None)
templateRefreshBtn.click(get_template_dropdown, None, [
templateFileSelectDropdown])
templateFileSelectDropdown.input(
load_template,
[templateFileSelectDropdown],
[promptTemplates, templateSelectDropdown],
show_progress=True,
)
templateSelectDropdown.change(
get_template_content,
[promptTemplates, templateSelectDropdown, systemPromptTxt],
[systemPromptTxt],
show_progress=True,
)
# S&L
renameHistoryBtn.click(
rename_chat_history,
[current_model, saveFileName, chatbot],
[historySelectList],
show_progress=True,
_js='(a,b,c,d)=>{return saveChatHistory(a,b,c,d);}'
)
exportMarkdownBtn.click(
export_markdown,
[current_model, saveFileName, chatbot],
[],
show_progress=True,
)
historyRefreshBtn.click(**refresh_history_args)
historyDeleteBtn.click(delete_chat_history, [current_model, historySelectList],
[status_display, historySelectList, chatbot],
_js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}').then(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
historySelectList.input(**load_history_from_file_args)
uploadFileBtn.upload(upload_chat_history, [current_model, uploadFileBtn], [
saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider,
frequency_penalty_slider, logit_bias_txt, user_identifier_txt]).then(**refresh_history_args)
historyDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".json");}')
historyMarkdownDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".md");}')
historySearchTextbox.input(
filter_history,
[user_name, historySearchTextbox],
[historySelectList]
)
# Advanced
temperature_slider.input(
set_temperature, [current_model, temperature_slider], None, show_progress=False)
top_p_slider.input(set_top_p, [current_model, top_p_slider], None, show_progress=False)
n_choices_slider.input(
set_n_choices, [current_model, n_choices_slider], None, show_progress=False)
stop_sequence_txt.input(
set_stop_sequence, [current_model, stop_sequence_txt], None, show_progress=False)
max_context_length_slider.input(
set_token_upper_limit, [current_model, max_context_length_slider], None, show_progress=False)
max_generation_slider.input(
set_max_tokens, [current_model, max_generation_slider], None, show_progress=False)
presence_penalty_slider.input(
set_presence_penalty, [current_model, presence_penalty_slider], None, show_progress=False)
frequency_penalty_slider.input(
set_frequency_penalty, [current_model, frequency_penalty_slider], None, show_progress=False)
logit_bias_txt.input(
set_logit_bias, [current_model, logit_bias_txt], None, show_progress=False)
user_identifier_txt.input(set_user_identifier, [
current_model, user_identifier_txt], None, show_progress=False)
default_btn.click(
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
)
# Invisible elements
changeSingleSessionBtn.click(
fn=lambda value: gr.Checkbox.update(value=value),
inputs=[single_turn_checkbox],
outputs=[single_turn_checkbox],
_js='(a)=>{return bgChangeSingleSession(a);}'
)
historySelectBtn.click( # This is an experimental feature... Not actually used.
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
_js='(a,b)=>{return bgSelectHistory(a,b);}'
)
demo.title = CHUANHU_TITLE
if __name__ == "__main__":
reload_javascript()
setup_wizard()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
allowed_paths=[HISTORY_DIR, assets_path],
server_name=server_name,
| # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
gr.Chatbot.postprocess = postprocess
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.Textbox("", visible=False)
promptTemplates = gr.State(load_template(get_template_names()[0], mode=2))
user_question = gr.State("")
assert type(my_api_key) == str
user_api_key = gr.State(my_api_key)
current_model = gr.State()
topic = gr.State(i18n("未命名对话历史记录"))
with gr.Row(elem_id="chuanhu-header"):
gr.HTML(get_html("header_title.html").format(
app_title=CHUANHU_TITLE), elem_id="app-title")
status_display = gr.Markdown(get_geoip, elem_id="status-display")
with gr.Row(elem_id="float-display"):
user_info = gr.Markdown(
value="getting user info...", elem_id="user-info")
with gr.Row(equal_height=True, elem_id="chuanhu-body"):
with gr.Column(elem_id="menu-area"):
with gr.Column(elem_id="chuanhu-history"):
with gr.Box():
with gr.Row(elem_id="chuanhu-history-header"):
with gr.Row(elem_id="chuanhu-history-search-row"):
with gr.Column(min_width=150, scale=2):
historySearchTextbox = gr.Textbox(show_label=False, container=False, placeholder=i18n(
"搜索(支持正则)..."), lines=1, elem_id="history-search-tb")
with gr.Column(min_width=52, scale=1, elem_id="gr-history-header-btns"):
uploadFileBtn = gr.UploadButton(
interactive=True, label="", file_types=[".json"], elem_id="gr-history-upload-btn")
historyRefreshBtn = gr.Button("", elem_id="gr-history-refresh-btn")
with gr.Row(elem_id="chuanhu-history-body"):
with gr.Column(scale=6, elem_id="history-select-wrap"):
historySelectList = gr.Radio(
label=i18n("从列表中加载对话"),
choices=get_history_names(),
value=get_first_history_name(),
# multiselect=False,
container=False,
elem_id="history-select-dropdown"
)
with gr.Row(visible=False):
with gr.Column(min_width=42, scale=1):
historyDeleteBtn = gr.Button(
"🗑️", elem_id="gr-history-delete-btn")
with gr.Column(min_width=42, scale=1):
historyDownloadBtn = gr.Button(
"⏬", elem_id="gr-history-download-btn")
with gr.Column(min_width=42, scale=1):
historyMarkdownDownloadBtn = gr.Button(
"⤵️", elem_id="gr-history-mardown-download-btn")
with gr.Row(visible=False):
with gr.Column(scale=6):
saveFileName = gr.Textbox(
show_label=True,
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
label=i18n("设置保存文件名"),
value=i18n("对话历史记录"),
elem_classes="no-container"
# container=False,
)
with gr.Column(scale=1):
renameHistoryBtn = gr.Button(
i18n("💾 保存对话"), elem_id="gr-history-save-btn")
exportMarkdownBtn = gr.Button(
i18n("📝 导出为 Markdown"), elem_id="gr-markdown-export-btn")
with gr.Column(elem_id="chuanhu-menu-footer"):
with gr.Row(elem_id="chuanhu-func-nav"):
gr.HTML(get_html("func_nav.html"))
# gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
# gr.Markdown(CHUANHU_DESCRIPTION, elem_id="chuanhu-author")
with gr.Column(elem_id="chuanhu-area", scale=5):
with gr.Column(elem_id="chatbot-area"):
with gr.Row(elem_id="chatbot-header"):
model_select_dropdown = gr.Dropdown(
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL],
interactive=True,
show_label=False, container=False, elem_id="model-select-dropdown"
)
lora_select_dropdown = gr.Dropdown(
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True,
container=False, visible=False,
)
gr.HTML(get_html("chatbot_header_btn.html").format(
json_label=i18n("历史记录(JSON)"),
md_label=i18n("导出为 Markdown")
), elem_id="chatbot-header-btn-bar")
with gr.Row():
chatbot = gr.Chatbot(
label="ChatGPT",
elem_id="chuanhu-chatbot",
latex_delimiters=latex_delimiters_set,
sanitize_html=False,
# height=700,
show_label=False,
avatar_images=[user_avatar, bot_avatar],
show_share_button=False,
)
with gr.Row(elem_id="chatbot-footer"):
with gr.Box(elem_id="chatbot-input-box"):
with gr.Row(elem_id="chatbot-input-row"):
gr.HTML(get_html("chatbot_more.html").format(
single_turn_label=i18n("单轮对话"),
websearch_label=i18n("在线搜索"),
upload_file_label=i18n("上传文件"),
uploaded_files_label=i18n("知识库文件"),
uploaded_files_tip=i18n("在工具箱中管理知识库文件")
))
with gr.Row(elem_id="chatbot-input-tb-row"):
with gr.Column(min_width=225, scale=12):
user_input = gr.Textbox(
elem_id="user-input-tb",
show_label=False,
placeholder=i18n("在这里输入"),
elem_classes="no-container",
max_lines=5,
# container=False
)
with gr.Column(min_width=42, scale=1, elem_id="chatbot-ctrl-btns"):
submitBtn = gr.Button(
value="", variant="primary", elem_id="submit-btn")
cancelBtn = gr.Button(
value="", variant="secondary", visible=False, elem_id="cancel-btn")
# Note: Buttons below are set invisible in UI. But they are used in JS.
with gr.Row(elem_id="chatbot-buttons", visible=False):
with gr.Column(min_width=120, scale=1):
emptyBtn = gr.Button(
i18n("🧹 新的对话"), elem_id="empty-btn"
)
with gr.Column(min_width=120, scale=1):
retryBtn = gr.Button(
i18n("🔄 重新生成"), elem_id="gr-retry-btn")
with gr.Column(min_width=120, scale=1):
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
with gr.Column(min_width=120, scale=1):
delLastBtn = gr.Button(
i18n("🗑️ 删除最新对话"), elem_id="gr-dellast-btn")
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(
"👍", elem_id="gr-like-btn")
with gr.Column(min_width=20, scale=1):
dislikeBtn = gr.Button(
"👎", elem_id="gr-dislike-btn")
with gr.Column(elem_id="toolbox-area", scale=1):
# For CSS setting, there is an extra box. Don't remove it.
with gr.Box(elem_id="chuanhu-toolbox"):
with gr.Row():
gr.Markdown("## " + i18n("工具箱"))
gr.HTML(get_html("close_btn.html").format(
obj="toolbox"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-toolbox-tabs"):
with gr.Tab(label=i18n("对话")):
with gr.Accordion(label=i18n("模型"), open=not HIDE_MY_KEY, visible=not HIDE_MY_KEY):
keyTxt = gr.Textbox(
show_label=True,
placeholder=f"Your API-key...",
value=hide_middle_chars(user_api_key.value),
type="password",
visible=not HIDE_MY_KEY,
label="API-Key",
)
if multi_api_key:
usageTxt = gr.Markdown(i18n(
"多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
else:
usageTxt = gr.Markdown(i18n(
"**发送消息** 或 **提交key** 以显示额度"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
gr.Markdown("---", elem_classes="hr-line", visible=not HIDE_MY_KEY)
with gr.Accordion(label="Prompt", open=True):
systemPromptTxt = gr.Textbox(
show_label=True,
placeholder=i18n("在这里输入System Prompt..."),
label="System prompt",
value=INITIAL_SYSTEM_PROMPT,
lines=8
)
retain_system_prompt_checkbox = gr.Checkbox(
label=i18n("新建对话保留Prompt"), value=False, visible=True,
elem_classes="switch-checkbox")
with gr.Accordion(label=i18n("加载Prompt模板"), open=False):
with gr.Column():
with gr.Row():
with gr.Column(scale=6):
templateFileSelectDropdown = gr.Dropdown(
label=i18n("选择Prompt模板集合文件"),
choices=get_template_names(),
multiselect=False,
value=get_template_names()[0],
container=False,
)
with gr.Column(scale=1):
templateRefreshBtn = gr.Button(
i18n("🔄 刷新"))
with gr.Row():
with gr.Column():
templateSelectDropdown = gr.Dropdown(
label=i18n("从Prompt模板中加载"),
choices=load_template(
get_template_names()[
0], mode=1
),
multiselect=False,
container=False,
)
gr.Markdown("---", elem_classes="hr-line")
with gr.Accordion(label=i18n("知识库"), open=True, elem_id="gr-kb-accordion", visible=True):
use_websearch_checkbox = gr.Checkbox(label=i18n(
"使用在线搜索"), value=False, elem_classes="switch-checkbox", elem_id="gr-websearch-cb",
visible=False)
index_files = gr.Files(label=i18n(
"上传"), type="file",
file_types=[".pdf", ".docx", ".pptx", ".epub", ".xlsx", ".txt", "text", "image"],
elem_id="upload-index-file")
two_column = gr.Checkbox(label=i18n(
"双栏pdf"), value=False)
summarize_btn = gr.Button(i18n("总结"), visible=False)
with gr.Tab(label=i18n("参数")):
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"),
elem_id="advanced-warning")
with gr.Accordion(i18n("参数"), open=True):
temperature_slider = gr.Slider(
minimum=-0,
maximum=2.0,
value=1.0,
step=0.1,
interactive=True,
label="temperature",
)
top_p_slider = gr.Slider(
minimum=-0,
maximum=1.0,
value=1.0,
step=0.05,
interactive=True,
label="top-p",
)
n_choices_slider = gr.Slider(
minimum=1,
maximum=10,
value=1,
step=1,
interactive=True,
label="n choices",
)
stop_sequence_txt = gr.Textbox(
show_label=True,
placeholder=i18n("停止符,用英文逗号隔开..."),
label="stop",
value="",
lines=1,
)
max_context_length_slider = gr.Slider(
minimum=1,
maximum=32768,
value=2000,
step=1,
interactive=True,
label="max context",
)
max_generation_slider = gr.Slider(
minimum=1,
maximum=32768,
value=1000,
step=1,
interactive=True,
label="max generations",
)
presence_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="presence penalty",
)
frequency_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="frequency penalty",
)
logit_bias_txt = gr.Textbox(
show_label=True,
placeholder=f"word:likelihood",
label="logit bias",
value="",
lines=1,
)
user_identifier_txt = gr.Textbox(
show_label=True,
placeholder=i18n("用于定位滥用行为"),
label=i18n("用户标识符"),
value=user_name.value,
lines=1,
)
with gr.Tab(label=i18n("关于")):
gr.Markdown("#### " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION)
with gr.Row(elem_id="popup-wrapper"):
with gr.Box(elem_id="chuanhu-popup"):
with gr.Box(elem_id="chuanhu-setting"):
with gr.Row():
gr.Markdown("## " + i18n("设置"))
gr.HTML(get_html("close_btn.html").format(
obj="box"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-setting-tabs"):
with gr.Tab(label=i18n("高级")):
gr.HTML(get_html("appearance_switcher.html").format(
label=i18n("切换亮暗色主题")), elem_classes="insert-block", visible=False)
use_streaming_checkbox = gr.Checkbox(
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION,
elem_classes="switch-checkbox"
)
language_select_dropdown = gr.Dropdown(
label=i18n("选择回复语言(针对搜索&索引功能)"),
choices=REPLY_LANGUAGES,
multiselect=False,
value=REPLY_LANGUAGES[0],
visible=False,
)
name_chat_method = gr.Dropdown(
label=i18n("对话命名方式"),
choices=HISTORY_NAME_METHODS,
multiselect=False,
interactive=True,
value=HISTORY_NAME_METHODS[chat_name_method_index],
)
single_turn_checkbox = gr.Checkbox(label=i18n(
"单轮对话"), value=False, elem_classes="switch-checkbox", elem_id="gr-single-session-cb",
visible=False)
# checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
with gr.Tab(i18n("网络")):
gr.Markdown(
i18n("⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置"),
elem_id="netsetting-warning")
default_btn = gr.Button(i18n("🔙 恢复默认网络设置"))
# 网络代理
proxyTxt = gr.Textbox(
show_label=True,
placeholder=i18n("未设置代理..."),
label=i18n("代理地址"),
value=http_proxy,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
# changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
# 优先展示自定义的api_host
apihostTxt = gr.Textbox(
show_label=True,
placeholder="api.openai.com",
label="OpenAI API-Host",
value=api_host or API_HOST,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
with gr.Tab(label=i18n("关于"), elem_id="about-tab"):
gr.Markdown("# " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
with gr.Box(elem_id="web-config", visible=False):
gr.HTML(get_html('web_config.html').format(
enableCheckUpdate_config=False,
hideHistoryWhenNotLoggedIn_config=hide_history_when_not_logged_in,
forView_i18n=i18n("仅供查看"),
deleteConfirm_i18n_pref=i18n("你真的要删除 "),
deleteConfirm_i18n_suff=i18n(" 吗?"),
usingLatest_i18n=i18n("您使用的就是最新版!"),
updatingMsg_i18n=i18n("正在尝试更新..."),
updateSuccess_i18n=i18n("更新成功,请重启本程序"),
updateFailure_i18n=i18n(
"更新失败,请尝试[手动更新](https://github.com/shibing624/chatgpt-webui/"),
regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt, single_turn_checkbox, temperature_slider,
top_p_slider, n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
retryBtn.click(**start_outputing_args).then(
retry,
[
current_model,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
[chatbot, status_display],
show_progress=True,
).then(**end_outputing_args)
retryBtn.click(**get_usage_args)
delFirstBtn.click(
delete_first_conversation,
[current_model],
[status_display],
)
delLastBtn.click(
delete_last_conversation,
[current_model, chatbot],
[chatbot, status_display],
show_progress=False
)
likeBtn.click(
like,
[current_model],
[status_display],
show_progress=False
)
dislikeBtn.click(
dislike,
[current_model],
[status_display],
show_progress=False
)
two_column.change(update_doc_config, [two_column], None)
# LLM Models
keyTxt.change(set_key, [current_model, keyTxt], [
user_api_key, status_display], api_name="set_key").then(**get_usage_args)
keyTxt.submit(**get_usage_args)
single_turn_checkbox.change(
set_single_turn, [current_model, single_turn_checkbox], None, show_progress=False)
model_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model], [
current_model, status_display, chatbot, lora_select_dropdown, user_api_key,
keyTxt], show_progress=True, api_name="get_model")
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
like_dislike_area], show_progress=False)
lora_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model],
[current_model, status_display, chatbot], show_progress=True)
# Template
systemPromptTxt.input(set_system_prompt, [
current_model, systemPromptTxt], None)
templateRefreshBtn.click(get_template_dropdown, None, [
templateFileSelectDropdown])
templateFileSelectDropdown.input(
load_template,
[templateFileSelectDropdown],
[promptTemplates, templateSelectDropdown],
show_progress=True,
)
templateSelectDropdown.change(
get_template_content,
[promptTemplates, templateSelectDropdown, systemPromptTxt],
[systemPromptTxt],
show_progress=True,
)
# S&L
renameHistoryBtn.click(
rename_chat_history,
[current_model, saveFileName, chatbot],
[historySelectList],
show_progress=True,
_js='(a,b,c,d)=>{return saveChatHistory(a,b,c,d);}'
)
exportMarkdownBtn.click(
export_markdown,
[current_model, saveFileName, chatbot],
[],
show_progress=True,
)
historyRefreshBtn.click(**refresh_history_args)
historyDeleteBtn.click(delete_chat_history, [current_model, historySelectList],
[status_display, historySelectList, chatbot],
_js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}').then(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
historySelectList.input(**load_history_from_file_args)
uploadFileBtn.upload(upload_chat_history, [current_model, uploadFileBtn], [
saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider,
frequency_penalty_slider, logit_bias_txt, user_identifier_txt]).then(**refresh_history_args)
historyDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".json");}')
historyMarkdownDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".md");}')
historySearchTextbox.input(
filter_history,
[user_name, historySearchTextbox],
[historySelectList]
)
# Advanced
temperature_slider.input(
set_temperature, [current_model, temperature_slider], None, show_progress=False)
top_p_slider.input(set_top_p, [current_model, top_p_slider], None, show_progress=False)
n_choices_slider.input(
set_n_choices, [current_model, n_choices_slider], None, show_progress=False)
stop_sequence_txt.input(
set_stop_sequence, [current_model, stop_sequence_txt], None, show_progress=False)
max_context_length_slider.input(
set_token_upper_limit, [current_model, max_context_length_slider], None, show_progress=False)
max_generation_slider.input(
set_max_tokens, [current_model, max_generation_slider], None, show_progress=False)
presence_penalty_slider.input(
set_presence_penalty, [current_model, presence_penalty_slider], None, show_progress=False)
frequency_penalty_slider.input(
set_frequency_penalty, [current_model, frequency_penalty_slider], None, show_progress=False)
logit_bias_txt.input(
set_logit_bias, [current_model, logit_bias_txt], None, show_progress=False)
user_identifier_txt.input(set_user_identifier, [
current_model, user_identifier_txt], None, show_progress=False)
default_btn.click(
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
)
# Invisible elements
changeSingleSessionBtn.click(
fn=lambda value: gr.Checkbox.update(value=value),
inputs=[single_turn_checkbox],
outputs=[single_turn_checkbox],
_js='(a)=>{return bgChangeSingleSession(a);}'
)
historySelectBtn.click( # This is an experimental feature... Not actually used.
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
_js='(a,b)=>{return bgSelectHistory(a,b);}'
)
demo.title = CHUANHU_TITLE
if __name__ == "__main__":
reload_javascript()
setup_wizard()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
allowed_paths=[HISTORY_DIR, assets_path],
server_name=server_name, | server_port=server_port, | 0 | 2023-12-27 12:14:26+00:00 | 12k |
camenduru/DiffMorpher-hf | app.py | [
{
"identifier": "DiffMorpherPipeline",
"path": "morph_attn.py",
"snippet": "class DiffMorpherPipeline(StableDiffusionPipeline):\n\n def __init__(self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler,\n safety_checker, feature_extractor, requires_safety_checker)\n self.img0_dict = dict()\n self.img1_dict = dict()\n\n def inv_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps //\n self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[\n timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n num_actual_inference_steps=None,\n guidance_scale=1.,\n eta=0.0,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(\n unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat(\n [unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if num_actual_inference_steps is not None and i >= num_actual_inference_steps:\n continue\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(\n model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * \\\n (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.inv_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n return latents\n\n @torch.no_grad()\n def ddim_inversion(self, latent, cond):\n timesteps = reversed(self.scheduler.timesteps)\n with torch.autocast(device_type='cuda', dtype=torch.float32):\n for i, t in enumerate(tqdm.tqdm(timesteps, desc=\"DDIM inversion\")):\n cond_batch = cond.repeat(latent.shape[0], 1, 1)\n\n alpha_prod_t = self.scheduler.alphas_cumprod[t]\n alpha_prod_t_prev = (\n self.scheduler.alphas_cumprod[timesteps[i - 1]]\n if i > 0 else self.scheduler.final_alpha_cumprod\n )\n\n mu = alpha_prod_t ** 0.5\n mu_prev = alpha_prod_t_prev ** 0.5\n sigma = (1 - alpha_prod_t) ** 0.5\n sigma_prev = (1 - alpha_prod_t_prev) ** 0.5\n\n eps = self.unet(\n latent, t, encoder_hidden_states=cond_batch).sample\n\n pred_x0 = (latent - sigma_prev * eps) / mu_prev\n latent = mu * pred_x0 + sigma * eps\n # if save_latents:\n # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))\n # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))\n return latent\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n ):\n \"\"\"\n predict the sample of the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - \\\n self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[\n prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0)\n # input image density range [-1, 1]\n latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):\n # latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \\\n # torch.sin(alpha * torch.pi / 2) * img_noise_1\n # latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1\n # latents = latents / ((1 - alpha) ** 2 + alpha ** 2)\n latents = slerp(img_noise_0, img_noise_1, alpha, self.use_adain)\n text_embeddings = (1 - alpha) * text_embeddings_0 + \\\n alpha * text_embeddings_1\n\n self.scheduler.set_timesteps(num_inference_steps)\n if use_lora:\n if fix_lora is not None:\n self.unet = load_lora(self.unet, lora_0, lora_1, fix_lora)\n else:\n self.unet = load_lora(self.unet, lora_0, lora_1, alpha)\n\n for i, t in enumerate(tqdm.tqdm(self.scheduler.timesteps, desc=f\"DDIM Sampler, alpha={alpha}\")):\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat(\n [unconditioning[i].expand(*text_embeddings.shape), text_embeddings])\n # predict the noise\n noise_pred = self.unet(\n model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.0:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(\n 2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * \\\n (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n # YUJUN: right now, the only difference between step here and step in scheduler\n # is that scheduler version would clamp pred_x0 between [-1,1]\n # don't know if that's gonna have huge impact\n latents = self.scheduler.step(\n noise_pred, t, latents, return_dict=False)[0]\n return latents\n\n @torch.no_grad()\n def get_text_embeddings(self, prompt, guidance_scale, neg_prompt, batch_size):\n DEVICE = torch.device(\n \"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.cuda())[0]\n\n if guidance_scale > 1.:\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(\n unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat(\n [unconditional_embeddings, text_embeddings], dim=0)\n\n return text_embeddings\n\n def __call__(\n self,\n img_0=None,\n img_1=None,\n img_path_0=None,\n img_path_1=None,\n prompt_0=\"\",\n prompt_1=\"\",\n save_lora_dir=\"./lora\",\n load_lora_path_0=None,\n load_lora_path_1=None,\n lora_steps=200,\n lora_lr=2e-4,\n lora_rank=16,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n num_actual_inference_steps=None,\n guidance_scale=1,\n attn_beta=0,\n lamb=0.6,\n use_lora = True,\n use_adain = True,\n use_reschedule = True,\n output_path = \"./results\",\n num_frames=50,\n fix_lora=None,\n progress=tqdm,\n unconditioning=None,\n neg_prompt=None,\n **kwds):\n\n # if isinstance(prompt, list):\n # batch_size = len(prompt)\n # elif isinstance(prompt, str):\n # if batch_size > 1:\n # prompt = [prompt] * batch_size\n self.scheduler.set_timesteps(num_inference_steps)\n self.use_lora = use_lora\n self.use_adain = use_adain\n self.use_reschedule = use_reschedule\n self.output_path = output_path\n \n if img_0 is None:\n img_0 = Image.open(img_path_0).convert(\"RGB\")\n # else:\n # img_0 = Image.fromarray(img_0).convert(\"RGB\")\n \n if img_1 is None:\n img_1 = Image.open(img_path_1).convert(\"RGB\")\n # else:\n # img_1 = Image.fromarray(img_1).convert(\"RGB\")\n if self.use_lora:\n print(\"Loading lora...\")\n if not load_lora_path_0:\n\n weight_name = f\"{output_path.split('/')[-1]}_lora_0.ckpt\"\n load_lora_path_0 = save_lora_dir + \"/\" + weight_name\n if not os.path.exists(load_lora_path_0):\n train_lora(img_0, prompt_0, save_lora_dir, None, self.tokenizer, self.text_encoder,\n self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name)\n print(f\"Load from {load_lora_path_0}.\")\n if load_lora_path_0.endswith(\".safetensors\"):\n lora_0 = safetensors.torch.load_file(\n load_lora_path_0, device=\"cpu\")\n else:\n lora_0 = torch.load(load_lora_path_0, map_location=\"cpu\")\n\n if not load_lora_path_1:\n weight_name = f\"{output_path.split('/')[-1]}_lora_1.ckpt\"\n load_lora_path_1 = save_lora_dir + \"/\" + weight_name\n if not os.path.exists(load_lora_path_1):\n train_lora(img_1, prompt_1, save_lora_dir, None, self.tokenizer, self.text_encoder,\n self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name)\n print(f\"Load from {load_lora_path_1}.\")\n if load_lora_path_1.endswith(\".safetensors\"):\n lora_1 = safetensors.torch.load_file(\n load_lora_path_1, device=\"cpu\")\n else:\n lora_1 = torch.load(load_lora_path_1, map_location=\"cpu\")\n\n text_embeddings_0 = self.get_text_embeddings(\n prompt_0, guidance_scale, neg_prompt, batch_size)\n text_embeddings_1 = self.get_text_embeddings(\n prompt_1, guidance_scale, neg_prompt, batch_size)\n img_0 = get_img(img_0)\n img_1 = get_img(img_1)\n if self.use_lora:\n self.unet = load_lora(self.unet, lora_0, lora_1, 0)\n img_noise_0 = self.ddim_inversion(\n self.image2latent(img_0), text_embeddings_0)\n if self.use_lora:\n self.unet = load_lora(self.unet, lora_0, lora_1, 1)\n img_noise_1 = self.ddim_inversion(\n self.image2latent(img_1), text_embeddings_1)\n\n print(\"latents shape: \", img_noise_0.shape)\n\n def morph(alpha_list, progress, desc, save=False):\n images = []\n if attn_beta is not None:\n\n self.unet = load_lora(self.unet, lora_0, lora_1, 0 if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k],\n self.img0_dict, k)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[0],\n False,\n fix_lora\n )\n first_image = self.latent2image(latents)\n first_image = Image.fromarray(first_image)\n if save:\n first_image.save(f\"{self.output_path}/{0:02d}.png\")\n\n self.unet = load_lora(self.unet, lora_0, lora_1, 1 if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k],\n self.img1_dict, k)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[-1], \n False,\n fix_lora\n )\n last_image = self.latent2image(latents)\n last_image = Image.fromarray(last_image)\n if save:\n last_image.save(\n f\"{self.output_path}/{num_frames - 1:02d}.png\")\n\n for i in progress.tqdm(range(1, num_frames - 1), desc=desc):\n alpha = alpha_list[i]\n self.unet = load_lora(self.unet, lora_0, lora_1, alpha if fix_lora is None else fix_lora)\n attn_processor_dict = {}\n for k in self.unet.attn_processors.keys():\n if do_replace_attn(k):\n attn_processor_dict[k] = LoadProcessor(\n self.unet.attn_processors[k], k, self.img0_dict, self.img1_dict, alpha, attn_beta, lamb)\n else:\n attn_processor_dict[k] = self.unet.attn_processors[k]\n\n self.unet.set_attn_processor(attn_processor_dict)\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[i], \n False,\n fix_lora\n )\n image = self.latent2image(latents)\n image = Image.fromarray(image)\n if save:\n image.save(f\"{self.output_path}/{i:02d}.png\")\n images.append(image)\n\n images = [first_image] + images + [last_image]\n\n else:\n for k, alpha in enumerate(alpha_list):\n\n latents = self.cal_latent(\n num_inference_steps,\n guidance_scale,\n unconditioning,\n img_noise_0,\n img_noise_1,\n text_embeddings_0,\n text_embeddings_1,\n lora_0,\n lora_1,\n alpha_list[k], \n self.use_lora,\n fix_lora\n )\n image = self.latent2image(latents)\n image = Image.fromarray(image)\n if save:\n image.save(f\"{self.output_path}/{k:02d}.png\")\n images.append(image)\n\n return images\n\n with torch.no_grad():\n if self.use_reschedule:\n alpha_scheduler = AlphaScheduler()\n alpha_list = list(torch.linspace(0, 1, num_frames))\n images_pt = morph(alpha_list, progress, \"Sampling...\", False)\n images_pt = [transforms.ToTensor()(img).unsqueeze(0)\n for img in images_pt]\n alpha_scheduler.from_imgs(images_pt)\n alpha_list = alpha_scheduler.get_list()\n print(alpha_list)\n images = morph(alpha_list, progress, \"Reschedule...\", False)\n else:\n alpha_list = list(torch.linspace(0, 1, num_frames))\n print(alpha_list)\n images = morph(alpha_list, progress, \"Sampling...\", False)\n\n return images"
},
{
"identifier": "train_lora",
"path": "lora_utils.py",
"snippet": "def train_lora(image, prompt, save_lora_dir, model_path=None, tokenizer=None, text_encoder=None, vae=None, unet=None, noise_scheduler=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, weight_name=None, safe_serialization=False, progress=tqdm):\n # initialize accelerator\n accelerator = Accelerator(\n gradient_accumulation_steps=1,\n # mixed_precision='fp16'\n )\n set_seed(0)\n\n # Load the tokenizer\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n subfolder=\"tokenizer\",\n revision=None,\n use_fast=False,\n )\n # initialize the model\n if noise_scheduler is None:\n noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder=\"scheduler\")\n if text_encoder is None:\n text_encoder_cls = import_model_class_from_model_name_or_path(model_path, revision=None)\n text_encoder = text_encoder_cls.from_pretrained(\n model_path, subfolder=\"text_encoder\", revision=None\n )\n if vae is None:\n vae = AutoencoderKL.from_pretrained(\n model_path, subfolder=\"vae\", revision=None\n )\n if unet is None:\n unet = UNet2DConditionModel.from_pretrained(\n model_path, subfolder=\"unet\", revision=None\n )\n\n # set device and dtype\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n vae.requires_grad_(False)\n text_encoder.requires_grad_(False)\n unet.requires_grad_(False)\n\n unet.to(device)\n vae.to(device)\n text_encoder.to(device)\n\n # initialize UNet LoRA\n unet_lora_attn_procs = {}\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n else:\n raise NotImplementedError(\"name must start with up_blocks, mid_blocks, or down_blocks\")\n\n if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):\n lora_attn_processor_class = LoRAAttnAddedKVProcessor\n else:\n lora_attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n unet_lora_attn_procs[name] = lora_attn_processor_class(\n hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank\n )\n unet.set_attn_processor(unet_lora_attn_procs)\n unet_lora_layers = AttnProcsLayers(unet.attn_processors)\n\n # Optimizer creation\n params_to_optimize = (unet_lora_layers.parameters())\n optimizer = torch.optim.AdamW(\n params_to_optimize,\n lr=lora_lr,\n betas=(0.9, 0.999),\n weight_decay=1e-2,\n eps=1e-08,\n )\n\n lr_scheduler = get_scheduler(\n \"constant\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=lora_steps,\n num_cycles=1,\n power=1.0,\n )\n\n # prepare accelerator\n unet_lora_layers = accelerator.prepare_model(unet_lora_layers)\n optimizer = accelerator.prepare_optimizer(optimizer)\n lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)\n\n # initialize text embeddings\n with torch.no_grad():\n text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None)\n text_embedding = encode_prompt(\n text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False\n )\n\n if type(image) == np.ndarray:\n image = Image.fromarray(image)\n \n # initialize latent distribution\n image_transforms = transforms.Compose(\n [\n transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),\n # transforms.RandomCrop(512),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n image = image_transforms(image).to(device)\n image = image.unsqueeze(dim=0)\n \n latents_dist = vae.encode(image).latent_dist\n for _ in progress.tqdm(range(lora_steps), desc=\"Training LoRA...\"):\n unet.train()\n model_input = latents_dist.sample() * vae.config.scaling_factor\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(model_input)\n bsz, channels, height, width = model_input.shape\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device\n )\n timesteps = timesteps.long()\n\n # Add noise to the model input according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)\n\n # Predict the noise residual\n model_pred = unet(noisy_model_input, timesteps, text_embedding).sample\n\n # Get the target for loss depending on the prediction type\n if noise_scheduler.config.prediction_type == \"epsilon\":\n target = noise\n elif noise_scheduler.config.prediction_type == \"v_prediction\":\n target = noise_scheduler.get_velocity(model_input, noise, timesteps)\n else:\n raise ValueError(f\"Unknown prediction type {noise_scheduler.config.prediction_type}\")\n\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n # save the trained lora\n # unet = unet.to(torch.float32)\n # vae = vae.to(torch.float32)\n # text_encoder = text_encoder.to(torch.float32)\n\n # unwrap_model is used to remove all special modules added when doing distributed training\n # so here, there is no need to call unwrap_model\n # unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)\n LoraLoaderMixin.save_lora_weights(\n save_directory=save_lora_dir,\n unet_lora_layers=unet_lora_layers,\n text_encoder_lora_layers=None,\n weight_name=weight_name,\n safe_serialization=safe_serialization\n )"
}
] | import os
import torch
import numpy as np
import cv2
import gradio as gr
from PIL import Image
from datetime import datetime
from morph_attn import DiffMorpherPipeline
from lora_utils import train_lora | 7,292 |
LENGTH=450
def train_lora_interface(
image,
prompt,
model_path,
output_path,
lora_steps,
lora_rank,
lora_lr,
num
):
os.makedirs(output_path, exist_ok=True)
train_lora(image, prompt, output_path, model_path,
lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress())
return f"Train LoRA {'A' if num == 0 else 'B'} Done!"
def run_diffmorpher(
image_0,
image_1,
prompt_0,
prompt_1,
model_path,
lora_mode,
lamb,
use_adain,
use_reschedule,
num_frames,
fps,
load_lora_path_0,
load_lora_path_1,
output_path
):
run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d")
os.makedirs(output_path, exist_ok=True)
|
LENGTH=450
def train_lora_interface(
image,
prompt,
model_path,
output_path,
lora_steps,
lora_rank,
lora_lr,
num
):
os.makedirs(output_path, exist_ok=True)
train_lora(image, prompt, output_path, model_path,
lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress())
return f"Train LoRA {'A' if num == 0 else 'B'} Done!"
def run_diffmorpher(
image_0,
image_1,
prompt_0,
prompt_1,
model_path,
lora_mode,
lamb,
use_adain,
use_reschedule,
num_frames,
fps,
load_lora_path_0,
load_lora_path_1,
output_path
):
run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d")
os.makedirs(output_path, exist_ok=True) | morpher_pipeline = DiffMorpherPipeline.from_pretrained(model_path, torch_dtype=torch.float32).to("cuda") | 0 | 2023-12-25 04:51:41+00:00 | 12k |
camenduru/AnyDoor-online-hf | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,748 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 6 | 2023-12-25 04:48:34+00:00 | 12k |
pangxincheng/TaskManager | task_manager/main.py | [
{
"identifier": "CoreManager",
"path": "task_manager/manager/core.py",
"snippet": "class CoreManager(mp.Process):\n\n def __init__(\n self,\n core_manager_addr: str,\n gpu_manager_addr: str=\"ipc://gpu_manager\",\n task_manager_addr: str=\"ipc://task_manager\",\n log_dir: str=\"logs\",\n log_level: str=\"INFO\",\n ) -> None:\n mp.Process.__init__(self)\n assert core_manager_addr.startswith(\"tcp://\") or core_manager_addr.startswith(\"ipc://\"), \\\n \"core manager address must start with tcp:// or ipc://\"\n assert gpu_manager_addr.startswith(\"tcp://\") or gpu_manager_addr.startswith(\"ipc://\"), \\\n \"gpu manager address must start with tcp:// or ipc://\"\n assert task_manager_addr.startswith(\"tcp://\") or task_manager_addr.startswith(\"ipc://\"), \\\n \"task manager address must start with tcp:// or ipc://\"\n self.core_manager_addr = core_manager_addr\n self.gpu_manager_addr = gpu_manager_addr\n self.task_manager_addr = task_manager_addr\n self.log_dir = log_dir\n self.log_level = log_level\n\n def _init_manager(self) -> None:\n\n self.logger = common_utils.get_logger(\n logger_name=\"core_manager\",\n log_level=self.log_level,\n handler=os.path.join(self.log_dir, \"core_manager.log\")\n )\n\n self.logger.info(f\"CoreManager is listening on {self.core_manager_addr}\")\n self._core_manager = zmq_utils.ZMQServer(\n addr=self.core_manager_addr,\n )\n time.sleep(1)\n\n self.logger.info(f\"GPUManager is listening on {self.gpu_manager_addr}\")\n self._gpu_manager = zmq_utils.ZMQServer(\n addr=self.gpu_manager_addr,\n )\n\n self.logger.info(f\"TaskManager is listening on {self.task_manager_addr}\")\n self._task_manager = zmq_utils.ZMQServer(\n addr=self.task_manager_addr,\n )\n\n self.watched_gpus = {}\n self.watched_tasks = {}\n\n pycuda_drv.init()\n self.running = True\n\n def run(self) -> None:\n self._init_manager()\n while self.running:\n identity, msg = self._core_manager.recv_binary()\n command = common_utils.byte_msg_to_dict(msg)\n self.logger.info(f\"receive command to call {command['function']}\")\n return_msg = self.exception_wrapper(\n fn=getattr(self, command[\"function\"], self._default_fn),\n *command.get(\"args\", {}),\n **command.get(\"kwargs\", {})\n )\n self._core_manager.send_binary(\n any=common_utils.dict_to_byte_msg(return_msg),\n identity=identity\n )\n\n def exception_wrapper(self, fn, *args, **kwargs) -> Dict[str, Any]:\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n self.logger.error(f\"Exception when call {fn.__name__}\")\n self.logger.exception(e)\n return {\n \"status\": 400,\n \"result\": f\"Exception when call {fn.__name__}, the excption is \" + str(e)\n }\n \n def _default_fn(self, *args, **kwargs) -> None:\n raise NotImplementedError(\"This function is not implemented\")\n\n def exit(self) -> Dict[str, Any]:\n self.logger.info(\"=> [info] exit core server...\")\n self.running = False\n return_msg = {\n \"watched_gpus\": {},\n \"watched_tasks\": {}\n }\n for identity in self.watched_gpus.keys():\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\"\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[\"watched_gpus\"][identity] = msg\n for identity in self.watched_tasks.keys():\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\"\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[\"watched_tasks\"][identity] = msg\n return {\n \"status\": 200,\n \"result\": {\n \"msg\": \"👋bye~\",\n \"watched_gpus\": return_msg\n }\n }\n\n def get_gpus_info_by_identities(self, identities: List[str], info_level: str=\"simple\") -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n\n return_msg = {}\n for identity in identities:\n if identity not in self.watched_gpus.keys():\n return_msg[identity] = {\n \"status\": 400,\n \"result\": f\"Could not find a watch dog with identity {identity}\"\n }\n else:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_gpu_info\",\n \"kwargs\": {\n \"info_level\": info_level\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def get_gpus_info_by_device_ids(self, device_ids: List[int], info_level: str=\"simple\") -> Dict[str, Any]:\n if len(device_ids) == 0:\n device_ids = list(range(pycuda_drv.Device.count()))\n assert len(device_ids) == len(set(device_ids)), \"device_ids should not contain duplicate elements\"\n assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \\\n \"The device_id should be in the valid range\"\n watched_gpu_device_ids = {\n self.watched_gpus[identity][\"device_id\"]: identity\n for identity in self.watched_gpus.keys()\n if self.watched_gpus[identity][\"device_id\"] in device_ids\n }\n unwatched_gpus = sorted(list(set(device_ids) - watched_gpu_device_ids.keys()))\n\n return_msg = {}\n for device_id in watched_gpu_device_ids.keys():\n identity = watched_gpu_device_ids[device_id]\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_gpu_info\",\n \"kwargs\": {\n \"info_level\": info_level\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n \n return_msg[\"unwatched\"] = []\n for device_id in unwatched_gpus:\n gpu_device = pycuda_drv.Device(device_id)\n device_msg = {\n \"device_id\": device_id,\n \"device_name\": gpu_device.name(),\n \"total_memory\": common_utils.fmt_bytes(gpu_device.total_memory()),\n \"compute_capability\": float(\"%d.%d\" % gpu_device.compute_capability()),\n }\n if info_level != \"simple\":\n device_attributes_tuples = gpu_device.get_attributes().items()\n device_attributes = {}\n\n for k, v in device_attributes_tuples:\n device_attributes[str(k)] = v\n device_msg[\"device_attributes\"] = device_attributes\n return_msg[\"unwatched\"].append(device_msg)\n \n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def start_watch_dog_by_device_ids(self, device_ids: List[int]) -> Dict[str, Any]:\n assert len(device_ids) > 0, \"device_ids should not be empty\"\n assert len(device_ids) == len(set(device_ids)), \"device_ids should not contain duplicate elements\"\n assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \\\n \"The device_id should be in the valid range\"\n watched_gpu_device_ids = {\n self.watched_gpus[identity][\"device_id\"]: identity\n for identity in self.watched_gpus.keys()\n if self.watched_gpus[identity][\"device_id\"] in device_ids\n }\n return_msg = {}\n for device_id in device_ids:\n if device_id in watched_gpu_device_ids.keys():\n return_msg[watched_gpu_device_ids[device_id]] = {\n \"status\": 400,\n \"result\": f\"GPU{device_id} is already being watched by {watched_gpu_device_ids[device_id]}\"\n }\n else:\n timestamp = str(time.time())\n identity = common_utils.md5(f\"watch_dog_{device_id}_{timestamp}\")\n watchdog = GPUManager(\n identity=identity,\n device_id=device_id,\n gpu_manager_addr=self.gpu_manager_addr,\n )\n watchdog.start()\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_gpus[identity] = {\n \"device_id\": device_id,\n \"watchdog\": watchdog,\n \"timestamp\": timestamp\n }\n return_msg[identity] = msg\n \n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def stop_watch_dog_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n \n return_msg = {}\n for identity in identities:\n if identity not in self.watched_gpus.keys():\n return_msg[identity] = {\n \"status\": 400,\n \"result\": f\"Could not find a watch dog with identity {identity}\"\n }\n else:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_gpus[identity][\"watchdog\"].join()\n del self.watched_gpus[identity]\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def mem_alloc_by_identities(self, identities: List[str], chunk_sizes: List[int], max_sizes: List[int], units: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(chunk_sizes) == len(max_sizes) == len(units), \"The lengths of identities, chunk_sizes, max_sizes and units should be equal\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n assert all([chunk_size > 0 for chunk_size in chunk_sizes]), \"The chunk_size should be positive\"\n assert all([max_size > 0 for max_size in max_sizes]), \"The max_size should be positive\"\n assert all([chunk_size <= max_size for chunk_size, max_size in zip(chunk_sizes, max_sizes)]), \"The chunk_size should be less than or equal to max_size\"\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity, chunk_size, max_size, unit in zip(identities, chunk_sizes, max_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"mem_alloc\",\n \"kwargs\": {\n \"chunk_size\": chunk_size,\n \"max_size\": max_size,\n \"unit\": unit,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def mem_release_by_identities(self, identities: List[str], mem_sizes: List[int], units: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_gpus.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert len(identities) == len(mem_sizes) == len(units), \"The lengths of identities, mem_sizes and units should be equal\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n assert all([mem_size > 0 for mem_size in mem_sizes]), \"The mem_size should be positive\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n return_msg = {}\n for identity, mem_size, unit in zip(identities, mem_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"mem_release\",\n \"kwargs\": {\n \"mem_size\": mem_size,\n \"unit\": unit,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def start_preemptive_by_identities(self, identities, chunk_sizes: List[int], max_sizes: List[int], units: List[str], auto_close: bool=False) -> Dict[str, Any]:\n assert len(identities) == len(chunk_sizes) == len(max_sizes) == len(units), \"The lengths of identities, chunk_sizes, max_sizes and units should be equal\"\n assert all([unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"] for unit in units]), \"The unit should be one of B, KiB, MiB and GiB\"\n assert all([chunk_size > 0 for chunk_size in chunk_sizes]), \"The chunk_size should be positive\"\n assert all([max_size > 0 for max_size in max_sizes]), \"The max_size should be positive\"\n assert all([chunk_size <= max_size for chunk_size, max_size in zip(chunk_sizes, max_sizes)]), \"The chunk_size should be less than or equal to max_size\"\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity, chunk_size, max_size, unit in zip(identities, chunk_sizes, max_sizes, units):\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"start_preemptive\",\n \"kwargs\": {\n \"chunk_size\": chunk_size,\n \"max_size\": max_size,\n \"unit\": unit,\n \"auto_close\": auto_close,\n }\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n \n def stop_preemptive_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_gpus.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._gpu_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"stop_preemptive\",\n }),\n identity=identity\n )\n identity_, msg = self._gpu_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def add_task(self, user_args: List[str], stdout_file: str, stderr_file: str) -> Dict[str, Any]:\n assert len(user_args) > 0, \"user_args should not be empty\"\n timestamp = str(time.time())\n identity = common_utils.md5(\"_\".join(user_args) + timestamp)\n watchdog = TaskManager(\n identity=identity,\n core_manager_addr=self.core_manager_addr,\n task_manager_addr=self.task_manager_addr,\n user_args=user_args,\n stdout_file=stdout_file,\n stderr_file=stderr_file\n )\n watchdog.start()\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_tasks[identity] = {\n \"watchdog\": watchdog,\n \"user_args\": user_args,\n \"timestamp\": timestamp,\n }\n return {\n \"status\": 200,\n \"result\": {\n identity: msg\n }\n }\n\n def remove_task_by_task_daemon(self, identity: str, msg: str, return_code: int) -> Dict[str, Any]:\n assert identity in self.watched_tasks.keys(), \"The identity should be in the valid range\"\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n self.watched_tasks[identity][\"watchdog\"].join()\n del self.watched_tasks[identity]\n return {\n \"status\": 200,\n \"result\": {\n \"identity\": identity,\n \"msg\": msg,\n \"return_code\": return_code,\n }\n }\n\n def remove_tasks(self, identities: List[str]) -> Dict[str, Any]:\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_tasks.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n return_msg[identity] = msg\n self.watched_tasks[identity][\"watchdog\"].join()\n del self.watched_tasks[identity]\n return {\n \"status\": 200,\n \"result\": return_msg\n }\n\n def get_task_info_by_identities(self, identities: List[str]) -> Dict[str, Any]:\n if len(identities) == 0:\n identities = list(self.watched_tasks.keys())\n assert len(identities) == len(set(identities)), \"identities should not contain duplicate elements\"\n assert all([identity in self.watched_tasks.keys() for identity in identities]), \"The identity should be in the valid range\"\n return_msg = {}\n for identity in identities:\n self._task_manager.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_status\",\n }),\n identity=identity\n )\n identity_, msg = self._task_manager.recv_binary()\n identity_ = identity_.decode(\"utf-8\")\n msg = common_utils.byte_msg_to_dict(msg)\n assert identity == identity_, \"identity mismatch\"\n msg[\"result\"][\"user_args\"] = self.watched_tasks[identity][\"user_args\"]\n msg[\"result\"][\"timestamp\"] = self.watched_tasks[identity][\"timestamp\"]\n return_msg[identity] = msg\n return {\n \"status\": 200,\n \"result\": return_msg\n }"
},
{
"identifier": "CLIController",
"path": "task_manager/controller/cli_controller.py",
"snippet": "class CLIController(cmd2.Cmd):\n\n def __init__(\n self,\n core_manager_addr: str,\n log_dir: str=\"logs\",\n log_level: str=\"INFO\",\n ):\n super().__init__()\n self.prompt = \"(🚀task_manager)> \"\n self.core_manager_addr = core_manager_addr\n self.log_dir = log_dir\n self.log_level = log_level\n self.logger = None\n self.client = None\n self.identity = \"cli_controller\"\n\n self._init_controller()\n\n def _init_controller(self):\n self.logger = common_utils.get_logger(\n logger_name=\"cli_controller\",\n log_level=self.log_level,\n handler=os.path.join(self.log_dir, \"cli_controller.log\")\n )\n\n self.logger.info(\"init core client\")\n self.client = zmq_utils.ZMQClient(\n addr=self.core_manager_addr,\n identity=self.identity\n )\n time.sleep(1)\n\n @cmd2.with_argparser(cmd2.Cmd2ArgumentParser())\n def do_exit(self, args):\n \"\"\"Exit the application.\"\"\"\n self.logger.info(\"=> [info] exit cli server...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"exit\",\n \"kwargs\": {},\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n self.client.close()\n return True\n\n ggibi_parser = cmd2.Cmd2ArgumentParser()\n ggibi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"identities\")\n ggibi_parser.add_argument(\"--info_level\", type=str, default=\"simple\", help=\"simple or detail\", choices=[\"simple\", \"detail\"])\n @cmd2.with_argparser(ggibi_parser)\n def do_get_gpus_info_by_identities(self, args):\n \"\"\"Get gpu information by identities.\"\"\"\n self.logger.info(\"=> [info] get gpu information by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_gpus_info_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"info_level\": args.info_level\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n ggibdi_parser = cmd2.Cmd2ArgumentParser()\n ggibdi_parser.add_argument(\"--device_ids\", type=int, nargs=\"+\", default=[], help=\"device ids\")\n ggibdi_parser.add_argument(\"--info_level\", type=str, default=\"simple\", help=\"simple or detail\", choices=[\"simple\", \"detail\"])\n @cmd2.with_argparser(ggibdi_parser)\n def do_get_gpus_info_by_device_ids(self, args):\n \"\"\"Get gpu information.\"\"\"\n self.logger.info(\"=> [info] get gpu information...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_gpus_info_by_device_ids\",\n \"kwargs\": {\n \"device_ids\": args.device_ids,\n \"info_level\": args.info_level\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n start_wdbdi_parser = cmd2.Cmd2ArgumentParser()\n start_wdbdi_parser.add_argument(\"--device_ids\", type=int, nargs=\"+\", help=\"device ids\")\n @cmd2.with_argparser(start_wdbdi_parser)\n def do_start_watch_dog_by_device_ids(self, args):\n \"\"\"Start watch dog by device ids.\"\"\"\n self.logger.info(\"=> [info] start watch dog by device ids...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"start_watch_dog_by_device_ids\",\n \"kwargs\": {\n \"device_ids\": args.device_ids\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n stop_wdbi_parser = cmd2.Cmd2ArgumentParser()\n stop_wdbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"identities\")\n @cmd2.with_argparser(stop_wdbi_parser)\n def do_stop_watch_dog_by_identities(self, args):\n \"\"\"Stop watch dog by identities.\"\"\"\n self.logger.info(\"=> [info] stop watch dog by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"stop_watch_dog_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n mabi_parser = cmd2.Cmd2ArgumentParser()\n mabi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n mabi_parser.add_argument(\"--chunk_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n mabi_parser.add_argument(\"--max_sizes\", type=int, nargs=\"+\", help=\"max size\")\n mabi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n @cmd2.with_argparser(mabi_parser)\n def do_mem_alloc_by_identities(self, args):\n \"\"\"Memory allocation by identities.\"\"\"\n self.logger.info(\"=> [info] memory allocation by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"mem_alloc_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"chunk_sizes\": args.chunk_sizes,\n \"max_sizes\": args.max_sizes,\n \"units\": args.units,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n mrbi_parser = cmd2.Cmd2ArgumentParser()\n mrbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n mrbi_parser.add_argument(\"--mem_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n mrbi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n @cmd2.with_argparser(mrbi_parser)\n def do_mem_release_by_identities(self, args):\n \"\"\"Memory release by identities.\"\"\"\n self.logger.info(\"=> [info] memory release by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"mem_release_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"mem_sizes\": args.mem_sizes,\n \"units\": args.units,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n start_pbi_parser = cmd2.Cmd2ArgumentParser()\n start_pbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n start_pbi_parser.add_argument(\"--chunk_sizes\", type=int, nargs=\"+\", help=\"chun size\")\n start_pbi_parser.add_argument(\"--max_sizes\", type=int, nargs=\"+\", help=\"max size\")\n start_pbi_parser.add_argument(\"--units\", type=str, nargs=\"+\", help=\"unit\", choices=[\"B\", \"KiB\", \"MiB\", \"GiB\"])\n start_pbi_parser.add_argument(\"--auto_close\", action=\"store_true\", help=\"auto close\")\n @cmd2.with_argparser(start_pbi_parser)\n def do_start_preemptive_by_identities(self, args):\n \"\"\"Start preemptive by identities.\"\"\"\n self.logger.info(\"=> [info] start preemptive...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"start_preemptive_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n \"chunk_sizes\": args.chunk_sizes,\n \"max_sizes\": args.max_sizes,\n \"units\": args.units,\n \"auto_close\": args.auto_close,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n stop_pbi_parser = cmd2.Cmd2ArgumentParser()\n stop_pbi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", help=\"device ids\")\n @cmd2.with_argparser(stop_pbi_parser)\n def do_stop_preemptive_by_identities(self, args):\n \"\"\"Stop preemptive by identities.\"\"\"\n self.logger.info(\"=> [info] stop preemptive...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"stop_preemptive_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n at_parser = cmd2.Cmd2ArgumentParser()\n at_parser.add_argument(\"--stdout_file\", type=str, completer=cmd2.Cmd.path_complete, required=True, help=\"stdout file\")\n at_parser.add_argument(\"--stderr_file\", type=str, completer=cmd2.Cmd.path_complete, default=None, help=\"stderr file\")\n at_parser.add_argument(\"user_args\", nargs=argparse.REMAINDER, completer=cmd2.Cmd.path_complete, help=\"user args\")\n @cmd2.with_argparser(at_parser)\n def do_add_task(self, args):\n \"\"\"Add task.\"\"\"\n self.logger.info(\"=> [info] add task...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"add_task\",\n \"kwargs\": {\n \"user_args\": args.user_args,\n \"stdout_file\": args.stdout_file,\n \"stderr_file\": args.stderr_file if args.stderr_file is not None else args.stdout_file,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n rt_parser = cmd2.Cmd2ArgumentParser()\n rt_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"task id\")\n @cmd2.with_argparser(rt_parser)\n def do_remove_tasks(self, args):\n \"\"\"Remove tasks.\"\"\"\n self.logger.info(\"=> [info] remove tasks...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"remove_tasks\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)\n\n gtibi_parser = cmd2.Cmd2ArgumentParser()\n gtibi_parser.add_argument(\"--identities\", type=str, nargs=\"+\", default=[], help=\"task id\")\n @cmd2.with_argparser(gtibi_parser)\n def do_get_task_info_by_identities(self, args):\n \"\"\"Get task info by identities.\"\"\"\n self.logger.info(\"=> [info] Get task info by identities...\")\n self.client.send_binary(common_utils.dict_to_byte_msg({\n \"function\": \"get_task_info_by_identities\",\n \"kwargs\": {\n \"identities\": args.identities,\n },\n }))\n msg = common_utils.byte_msg_to_dict(self.client.recv_binary()[0])\n rich.print(msg)"
}
] | import os
import sys
import rich
import time
import argparse
import multiprocessing as mp
import task_manager.utils.common_utils as common_utils
from task_manager.manager.core import CoreManager
from task_manager.controller.cli_controller import CLIController | 8,187 |
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
core_manager = CoreManager(
core_manager_addr=args.core_manager_addr,
gpu_manager_addr=args.gpu_manager_addr,
task_manager_addr=args.task_manager_addr,
log_dir=args.log_dir,
log_level=args.log_level,
)
core_manager.start()
time.sleep(1)
return core_manager
def start_cli_controller(args):
|
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
core_manager = CoreManager(
core_manager_addr=args.core_manager_addr,
gpu_manager_addr=args.gpu_manager_addr,
task_manager_addr=args.task_manager_addr,
log_dir=args.log_dir,
log_level=args.log_level,
)
core_manager.start()
time.sleep(1)
return core_manager
def start_cli_controller(args): | cli_controller = CLIController( | 1 | 2023-12-30 11:47:06+00:00 | 12k |
Shaokang-Agent/S2L | marlgrid/envs/goalcycle.py | [
{
"identifier": "MultiGridEnv",
"path": "marlgrid/base.py",
"snippet": "class MultiGridEnv(gym.Env):\n def __init__(\n self,\n agents = [],\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n reward_decay=True,\n seed=1337,\n respawn=False,\n ghost_mode=True,\n agent_spawn_kwargs = {}\n ):\n\n if grid_size is not None:\n assert width == None and height == None\n width, height = grid_size, grid_size\n\n self.respawn = respawn\n\n self.window = None\n\n self.width = width\n self.height = height\n self.max_steps = max_steps\n self.reward_decay = reward_decay\n self.seed(seed=seed)\n self.agent_spawn_kwargs = agent_spawn_kwargs\n self.ghost_mode = ghost_mode\n\n self.agents = []\n for agent in agents:\n self.add_agent(agent)\n\n self.reset()\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = gym.utils.seeding.np_random(seed)\n return [seed]\n\n @property\n def action_space(self):\n return gym.spaces.Tuple(\n [agent.action_space for agent in self.agents]\n )\n\n @property\n def observation_space(self):\n return gym.spaces.Tuple(\n [agent.observation_space for agent in self.agents]\n )\n\n @property\n def num_agents(self):\n return len(self.agents)\n \n def add_agent(self, agent_interface):\n if isinstance(agent_interface, dict):\n self.agents.append(GridAgentInterface(**agent_interface))\n elif isinstance(agent_interface, GridAgentInterface):\n self.agents.append(agent_interface)\n else:\n raise ValueError(\n \"To add an agent to a marlgrid environment, call add_agent with either a GridAgentInterface object \"\n \" or a dictionary that can be used to initialize one.\")\n\n def reset(self, **kwargs):\n for agent in self.agents:\n agent.agents = []\n agent.reset(new_episode=True)\n\n self._gen_grid(self.width, self.height)\n\n for agent in self.agents:\n if agent.spawn_delay == 0:\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n\n self.step_count = 0\n obs = self.gen_obs()\n return obs\n\n def gen_obs_grid(self, agent):\n # If the agent is inactive, return an empty grid and a visibility mask that hides everything.\n if not agent.active:\n # below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because\n # agent views are usually square and this grid won't be used for anything.\n grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)\n vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)\n return grid, vis_mask\n\n topX, topY, botX, botY = agent.get_view_exts()\n\n grid = self.grid.slice(\n topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1\n )\n\n # Process occluders and visibility\n # Note that this incurs some slight performance cost\n vis_mask = agent.process_vis(grid.opacity)\n\n # Warning about the rest of the function:\n # Allows masking away objects that the agent isn't supposed to see.\n # But breaks consistency between the states of the grid objects in the parial views\n # and the grid objects overall.\n if len(getattr(agent, 'hide_item_types', []))>0:\n for i in range(grid.width):\n for j in range(grid.height):\n item = grid.get(i,j)\n if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):\n if len(item.agents) > 0:\n grid.set(i,j,item.agents[0])\n else:\n grid.set(i,j,None)\n\n return grid, vis_mask\n\n def gen_agent_obs(self, agent):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n grid, vis_mask = self.gen_obs_grid(agent)\n grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)\n if agent.observation_style=='image':\n return grid_image\n else:\n ret = {'pov': grid_image}\n if agent.observe_rewards:\n ret['reward'] = getattr(agent, 'step_reward', 0)\n if agent.observe_position:\n agent_pos = agent.pos if agent.pos is not None else (0,0)\n ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)\n if agent.observe_orientation:\n agent_dir = agent.dir if agent.dir is not None else 0\n ret['orientation'] = agent_dir\n return ret\n\n def gen_obs(self):\n return [self.gen_agent_obs(agent) for agent in self.agents]\n\n def __str__(self):\n return self.grid.__str__()\n\n def check_agent_position_integrity(self, title=''):\n '''\n This function checks whether each agent is present in the grid in exactly one place.\n This is particularly helpful for validating the world state when ghost_mode=False and\n agents can stack, since the logic for moving them around gets a bit messy.\n Prints a message and drops into pdb if there's an inconsistency.\n '''\n agent_locs = [[] for _ in range(len(self.agents))]\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n x = self.grid.get(i,j)\n for k,agent in enumerate(self.agents):\n if x==agent:\n agent_locs[k].append(('top', (i,j)))\n if hasattr(x, 'agents') and agent in x.agents:\n agent_locs[k].append(('stacked', (i,j)))\n if not all([len(x)==1 for x in agent_locs]):\n print(f\"{title} > Failed integrity test!\")\n for a, al in zip(self.agents, agent_locs):\n print(\" > \", a.color,'-', al)\n import pdb; pdb.set_trace()\n\n def step(self, actions):\n # Spawn agents if it's time.\n for agent in self.agents:\n if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n \n assert len(actions) == len(self.agents)\n\n step_rewards = np.zeros((len(self.agents,)), dtype=np.float)\n\n self.step_count += 1\n\n iter_agents = list(enumerate(zip(self.agents, actions)))\n iter_order = np.arange(len(iter_agents))\n self.np_random.shuffle(iter_order)\n for shuffled_ix in iter_order:\n agent_no, (agent, action) = iter_agents[shuffled_ix]\n agent.step_reward = 0\n\n if agent.active:\n\n cur_pos = agent.pos[:]\n cur_cell = self.grid.get(*cur_pos)\n fwd_pos = agent.front_pos[:]\n fwd_cell = self.grid.get(*fwd_pos)\n agent_moved = False\n\n # Rotate left\n if action == agent.actions.left:\n agent.dir = (agent.dir - 1) % 4\n\n # Rotate right\n elif action == agent.actions.right:\n agent.dir = (agent.dir + 1) % 4\n\n # Move forward\n elif action == agent.actions.forward:\n # Under the follow conditions, the agent can move forward.\n can_move = fwd_cell is None or fwd_cell.can_overlap()\n if self.ghost_mode is False and isinstance(fwd_cell, GridAgent):\n can_move = False\n\n if can_move:\n agent_moved = True\n # Add agent to new cell\n if fwd_cell is None:\n self.grid.set(*fwd_pos, agent)\n agent.pos = fwd_pos\n else:\n fwd_cell.agents.append(agent)\n agent.pos = fwd_pos\n\n # Remove agent from old cell\n if cur_cell == agent:\n self.grid.set(*cur_pos, None)\n else:\n assert cur_cell.can_overlap()\n cur_cell.agents.remove(agent)\n\n # Add agent's agents to old cell\n for left_behind in agent.agents:\n cur_obj = self.grid.get(*cur_pos)\n if cur_obj is None:\n self.grid.set(*cur_pos, left_behind)\n elif cur_obj.can_overlap():\n cur_obj.agents.append(left_behind)\n else: # How was \"agent\" there in teh first place?\n raise ValueError(\"?!?!?!\")\n\n # After moving, the agent shouldn't contain any other agents.\n agent.agents = [] \n # test_integrity(f\"After moving {agent.color} fellow\")\n\n # Rewards can be got iff. fwd_cell has a \"get_reward\" method\n if hasattr(fwd_cell, 'get_reward'):\n rwd = fwd_cell.get_reward(agent)\n if bool(self.reward_decay):\n rwd *= (1.0-0.9*(self.step_count/self.max_steps))\n step_rewards[agent_no] += rwd\n agent.reward(rwd)\n \n\n if isinstance(fwd_cell, (Lava, Goal)):\n agent.done = True\n\n # TODO: verify pickup/drop/toggle logic in an environment that \n # supports the relevant interactions.\n # Pick up an object\n elif action == agent.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if agent.carrying is None:\n agent.carrying = fwd_cell\n agent.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n else:\n pass\n\n # Drop an object\n elif action == agent.actions.drop:\n if not fwd_cell and agent.carrying:\n self.grid.set(*fwd_pos, agent.carrying)\n agent.carrying.cur_pos = fwd_pos\n agent.carrying = None\n else:\n pass\n\n # Toggle/activate an object\n elif action == agent.actions.toggle:\n if fwd_cell:\n wasted = bool(fwd_cell.toggle(agent, fwd_pos))\n else:\n pass\n\n # Done action (not used by default)\n elif action == agent.actions.done:\n pass\n\n else:\n raise ValueError(f\"Environment can't handle action {action}.\")\n\n agent.on_step(fwd_cell if agent_moved else None)\n\n \n # If any of the agents individually are \"done\" (hit lava or in some cases a goal) \n # but the env requires respawning, then respawn those agents.\n for agent in self.agents:\n if agent.done:\n if self.respawn:\n resting_place_obj = self.grid.get(*agent.pos)\n if resting_place_obj == agent:\n if agent.agents:\n self.grid.set(*agent.pos, agent.agents[0])\n agent.agents[0].agents += agent.agents[1:]\n else:\n self.grid.set(*agent.pos, None)\n else:\n resting_place_obj.agents.remove(agent)\n resting_place_obj.agents += agent.agents[:]\n agent.agents = []\n \n agent.reset(new_episode=False)\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n else: # if the agent shouldn't be respawned, then deactivate it.\n agent.deactivate()\n\n # The episode overall is done if all the agents are done, or if it exceeds the step limit.\n done = (self.step_count >= self.max_steps) or all([agent.done for agent in self.agents])\n\n obs = [self.gen_agent_obs(agent) for agent in self.agents]\n\n return obs, step_rewards, done, {}\n\n def put_obj(self, obj, i, j):\n \"\"\"\n Put an object at a specific position in the grid. Replace anything that is already there.\n \"\"\"\n self.grid.set(i, j, obj)\n if obj is not None:\n obj.set_position((i,j))\n return True\n\n def try_place_obj(self,obj, pos):\n ''' Try to place an object at a certain position in the grid.\n If it is possible, then do so and return True.\n Otherwise do nothing and return False. '''\n # grid_obj: whatever object is already at pos.\n grid_obj = self.grid.get(*pos)\n\n # If the target position is empty, then the object can always be placed.\n if grid_obj is None:\n self.grid.set(*pos, obj)\n obj.set_position(pos)\n return True\n\n # Otherwise only agents can be placed, and only if the target position can_overlap.\n if not (grid_obj.can_overlap() and obj.is_agent):\n return False\n\n # If ghost mode is off and there's already an agent at the target cell, the agent can't\n # be placed there.\n if (not self.ghost_mode) and (grid_obj.is_agent or (len(grid_obj.agents)>0)):\n return False\n\n grid_obj.agents.append(obj)\n obj.set_position(pos)\n return True\n\n def place_obj(self, obj, top=(0,0), size=None, reject_fn=None, max_tries=1e5):\n max_tries = int(max(1, min(max_tries, 1e5)))\n top = (max(top[0], 0), max(top[1], 0))\n if size is None:\n size = (self.grid.width, self.grid.height)\n bottom = (min(top[0] + size[0], self.grid.width), min(top[1] + size[1], self.grid.height))\n\n # agent_positions = [tuple(agent.pos) if agent.pos is not None else None for agent in self.agents]\n for try_no in range(max_tries):\n pos = self.np_random.integers(top, bottom)\n if (reject_fn is not None) and reject_fn(pos):\n continue\n else:\n if self.try_place_obj(obj, pos):\n break\n else:\n raise RecursionError(\"Rejection sampling failed in place_obj.\")\n\n return pos\n\n def place_agents(self, top=None, size=None, rand_dir=True, max_tries=1000):\n # warnings.warn(\"Placing agents with the function place_agents is deprecated.\")\n pass\n\n def render(\n self,\n mode=\"human\",\n close=False,\n highlight=True,\n tile_size=TILE_PIXELS,\n show_agent_views=True,\n max_agents_per_col=3,\n agent_col_width_frac = 0.3,\n agent_col_padding_px = 2,\n pad_grey = 100\n ):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n if mode == \"human\" and not self.window:\n # from gym.envs.classic_control.rendering import SimpleImageViewer\n\n self.window = SimpleImageViewer(caption=\"Marlgrid\")\n\n # Compute which cells are visible to the agent\n highlight_mask = np.full((self.width, self.height), False, dtype=np.bool)\n for agent in self.agents:\n if agent.active:\n xlow, ylow, xhigh, yhigh = agent.get_view_exts()\n dxlow, dylow = max(0, 0-xlow), max(0, 0-ylow)\n dxhigh, dyhigh = max(0, xhigh-self.grid.width), max(0, yhigh-self.grid.height)\n if agent.see_through_walls:\n highlight_mask[xlow+dxlow:xhigh-dxhigh, ylow+dylow:yhigh-dyhigh] = True\n else:\n a,b = self.gen_obs_grid(agent)\n highlight_mask[xlow+dxlow:xhigh-dxhigh, ylow+dylow:yhigh-dyhigh] |= (\n rotate_grid(b, a.orientation)[dxlow:(xhigh-xlow)-dxhigh, dylow:(yhigh-ylow)-dyhigh]\n )\n\n\n # Render the whole grid\n img = self.grid.render(\n tile_size, highlight_mask=highlight_mask if highlight else None\n )\n rescale = lambda X, rescale_factor=2: np.kron(\n X, np.ones((int(rescale_factor), int(rescale_factor), 1))\n )\n\n if show_agent_views:\n\n target_partial_width = int(img.shape[0]*agent_col_width_frac-2*agent_col_padding_px)\n target_partial_height = (img.shape[1]-2*agent_col_padding_px)//max_agents_per_col\n\n agent_views = [self.gen_agent_obs(agent) for agent in self.agents]\n agent_views = [view['pov'] if isinstance(view, dict) else view for view in agent_views]\n agent_views = [rescale(view, min(target_partial_width/view.shape[0], target_partial_height/view.shape[1])) for view in agent_views]\n # import pdb; pdb.set_trace()\n agent_views = [agent_views[pos:pos+max_agents_per_col] for pos in range(0, len(agent_views), max_agents_per_col)]\n\n f_offset = lambda view: np.array([target_partial_height - view.shape[1], target_partial_width - view.shape[0]])//2\n \n cols = []\n for col_views in agent_views:\n col = np.full(( img.shape[0],target_partial_width+2*agent_col_padding_px,3), pad_grey, dtype=np.uint8)\n for k, view in enumerate(col_views):\n offset = f_offset(view) + agent_col_padding_px\n offset[0] += k*target_partial_height\n col[offset[0]:offset[0]+view.shape[0], offset[1]:offset[1]+view.shape[1],:] = view\n cols.append(col)\n\n img = np.concatenate((img, *cols), axis=1)\n\n if mode == \"human\":\n if not self.window.isopen:\n self.window.imshow(img)\n self.window.window.set_caption(\"Marlgrid\")\n else:\n self.window.imshow(img)\n\n return img"
},
{
"identifier": "MultiGrid",
"path": "marlgrid/base.py",
"snippet": "class MultiGrid:\n\n tile_cache = {}\n\n def __init__(self, shape, obj_reg=None, orientation=0):\n self.orientation = orientation\n if isinstance(shape, tuple):\n self.width, self.height = shape\n self.grid = np.zeros((self.width, self.height), dtype=np.uint8) # w,h\n elif isinstance(shape, np.ndarray):\n self.width, self.height = shape.shape\n self.grid = shape\n else:\n raise ValueError(\"Must create grid from shape tuple or array.\")\n\n if self.width < 3 or self.height < 3:\n raise ValueError(\"Grid needs width, height >= 3\")\n\n self.obj_reg = ObjectRegistry(objs=[None]) if obj_reg is None else obj_reg\n\n @property\n def opacity(self):\n transparent_fun = np.vectorize(lambda k: (self.obj_reg.key_to_obj_map[k].see_behind() if hasattr(self.obj_reg.key_to_obj_map[k], 'see_behind') else True))\n return ~transparent_fun(self.grid)\n\n def __getitem__(self, *args, **kwargs):\n return self.__class__(\n np.ndarray.__getitem__(self.grid, *args, **kwargs),\n obj_reg=self.obj_reg,\n orientation=self.orientation,\n )\n\n def rotate_left(self, k=1):\n return self.__class__(\n rotate_grid(self.grid, rot_k=k), # np.rot90(self.grid, k=k),\n obj_reg=self.obj_reg,\n orientation=(self.orientation - k) % 4,\n )\n\n\n def slice(self, topX, topY, width, height, rot_k=0):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n sub_grid = self.__class__(\n (width, height),\n obj_reg=self.obj_reg,\n orientation=(self.orientation - rot_k) % 4,\n )\n x_min = max(0, topX)\n x_max = min(topX + width, self.width)\n y_min = max(0, topY)\n y_max = min(topY + height, self.height)\n\n x_offset = x_min - topX\n y_offset = y_min - topY\n sub_grid.grid[\n x_offset : x_max - x_min + x_offset, y_offset : y_max - y_min + y_offset\n ] = self.grid[x_min:x_max, y_min:y_max]\n\n sub_grid.grid = rotate_grid(sub_grid.grid, rot_k)\n\n sub_grid.width, sub_grid.height = sub_grid.grid.shape\n\n return sub_grid\n\n def set(self, i, j, obj):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[i, j] = self.obj_reg.get_key(obj)\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n\n return self.obj_reg.key_to_obj_map[self.grid[i, j]]\n\n def horz_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.width - x\n for i in range(0, length):\n self.set(x + i, y, obj_type())\n\n def vert_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.height - y\n for j in range(0, length):\n self.set(x, y + j, obj_type())\n\n def wall_rect(self, x, y, w, h, obj_type=Wall):\n self.horz_wall(x, y, w, obj_type=obj_type)\n self.horz_wall(x, y + h - 1, w, obj_type=obj_type)\n self.vert_wall(x, y, h, obj_type=obj_type)\n self.vert_wall(x + w - 1, y, h, obj_type=obj_type)\n\n def __str__(self):\n render = (\n lambda x: \" \"\n if x is None or not hasattr(x, \"str_render\")\n else x.str_render(dir=self.orientation)\n )\n hstars = \"*\" * (2 * self.width + 2)\n return (\n hstars\n + \"\\n\"\n + \"\\n\".join(\n \"*\" + \"\".join(render(self.get(i, j)) for i in range(self.width)) + \"*\"\n for j in range(self.height)\n )\n + \"\\n\"\n + hstars\n )\n\n def encode(self, vis_mask=None):\n \"\"\"\n Produce a compact numpy encoding of the grid\n \"\"\"\n\n if vis_mask is None:\n vis_mask = np.ones((self.width, self.height), dtype=bool)\n\n array = np.zeros((self.width, self.height, 3), dtype=\"uint8\")\n\n for i in range(self.width):\n for j in range(self.height):\n if vis_mask[i, j]:\n v = self.get(i, j)\n if v is None:\n array[i, j, :] = 0\n else:\n array[i, j, :] = v.encode()\n return array\n\n @classmethod\n def decode(cls, array):\n raise NotImplementedError\n width, height, channels = array.shape\n assert channels == 3\n vis_mask[i, j] = np.ones(shape=(width, height), dtype=np.bool)\n grid = cls((width, height))\n\n \n @classmethod\n def cache_render_fun(cls, key, f, *args, **kwargs):\n if key not in cls.tile_cache:\n cls.tile_cache[key] = f(*args, **kwargs)\n return np.copy(cls.tile_cache[key])\n\n @classmethod\n def cache_render_obj(cls, obj, tile_size, subdivs):\n if obj is None:\n return cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)\n else:\n img = cls.cache_render_fun(\n (tile_size, obj.__class__.__name__, *obj.encode()),\n cls.render_object, obj, tile_size, subdivs\n )\n if hasattr(obj, 'render_post'):\n return obj.render_post(img)\n else:\n return img\n\n @classmethod\n def empty_tile(cls, tile_size, subdivs):\n alpha = max(0, min(20, tile_size-10))\n img = np.full((tile_size, tile_size, 3), alpha, dtype=np.uint8)\n img[1:,:-1] = 0\n return img\n\n @classmethod\n def render_object(cls, obj, tile_size, subdivs):\n img = np.zeros((tile_size*subdivs,tile_size*subdivs, 3), dtype=np.uint8)\n obj.render(img)\n # if 'Agent' not in obj.type and len(obj.agents) > 0:\n # obj.agents[0].render(img)\n return downsample(img, subdivs).astype(np.uint8)\n\n @classmethod\n def blend_tiles(cls, img1, img2):\n '''\n This function renders one \"tile\" on top of another. Kinda janky, works surprisingly well.\n Assumes img2 is a downscaled monochromatic with a black (0,0,0) background.\n '''\n alpha = img2.sum(2, keepdims=True)\n max_alpha = alpha.max()\n if max_alpha == 0:\n return img1\n return (\n ((img1 * (max_alpha-alpha))+(img2*alpha)\n )/max_alpha\n ).astype(img1.dtype)\n\n @classmethod\n def render_tile(cls, obj, tile_size=TILE_PIXELS, subdivs=3, top_agent=None):\n subdivs = 3\n\n if obj is None:\n img = cls.cache_render_obj(obj, tile_size, subdivs)\n else:\n if ('Agent' in obj.type) and (top_agent in obj.agents):\n # If the tile is a stack of agents that includes the top agent, then just render the top agent.\n img = cls.cache_render_obj(top_agent, tile_size, subdivs)\n else: \n # Otherwise, render (+ downsize) the item in the tile.\n img = cls.cache_render_obj(obj, tile_size, subdivs)\n # If the base obj isn't an agent but has agents on top, render an agent and blend it in.\n if len(obj.agents)>0 and 'Agent' not in obj.type:\n if top_agent in obj.agents:\n img_agent = cls.cache_render_obj(top_agent, tile_size, subdivs)\n else:\n img_agent = cls.cache_render_obj(obj.agents[0], tile_size, subdivs)\n img = cls.blend_tiles(img, img_agent)\n\n # Render the tile border if any of the corners are black.\n if (img[([0,0,-1,-1],[0,-1,0,-1])]==0).all(axis=-1).any():\n img = img + cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)\n return img\n\n def render(self, tile_size, highlight_mask=None, visible_mask=None, top_agent=None):\n width_px = self.width * tile_size\n height_px = self.height * tile_size\n\n img = np.zeros(shape=(height_px, width_px), dtype=np.uint8)[...,None]+COLORS['shadow']\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n if visible_mask is not None and not visible_mask[i,j]:\n continue\n obj = self.get(i, j)\n\n tile_img = MultiGrid.render_tile(\n obj,\n tile_size=tile_size,\n top_agent=top_agent\n )\n\n ymin = j * tile_size\n ymax = (j + 1) * tile_size\n xmin = i * tile_size\n xmax = (i + 1) * tile_size\n\n img[ymin:ymax, xmin:xmax, :] = rotate_grid(tile_img, self.orientation)\n \n if highlight_mask is not None:\n hm = np.kron(highlight_mask.T, np.full((tile_size, tile_size), 255, dtype=np.uint16)\n )[...,None] # arcane magic.\n img = np.right_shift(img.astype(np.uint16)*8+hm*2, 3).clip(0,255).astype(np.uint8)\n\n return img"
}
] | from ..base import MultiGridEnv, MultiGrid
from ..objects import * | 7,280 |
class ClutteredGoalCycleEnv(MultiGridEnv):
mission = "Cycle between yellow goal tiles."
metadata = {}
def __init__(self, *args, reward=1, penalty=0.0, n_clutter=None, clutter_density=None, n_bonus_tiles=3,
initial_reward=True, cycle_reset=False, reset_on_mistake=False, reward_decay=False, **kwargs):
if (n_clutter is None) == (clutter_density is None):
raise ValueError("Must provide n_clutter xor clutter_density in environment config.")
# Overwrite the default reward_decay for goal cycle environments.
super().__init__(*args, **{**kwargs, 'reward_decay': reward_decay})
if clutter_density is not None:
self.n_clutter = int(clutter_density * (self.width - 2) * (self.height - 2))
else:
self.n_clutter = n_clutter
self.reward = reward
self.penalty = penalty
self.initial_reward = initial_reward
self.n_bonus_tiles = n_bonus_tiles
self.reset_on_mistake = reset_on_mistake
self.bonus_tiles = []
self.bonus_tiles_pos = [[] for _ in range(self.n_bonus_tiles)]
self.wall_pos = [[] for _ in range(self.n_clutter)]
def _gen_grid(self, width, height):
|
class ClutteredGoalCycleEnv(MultiGridEnv):
mission = "Cycle between yellow goal tiles."
metadata = {}
def __init__(self, *args, reward=1, penalty=0.0, n_clutter=None, clutter_density=None, n_bonus_tiles=3,
initial_reward=True, cycle_reset=False, reset_on_mistake=False, reward_decay=False, **kwargs):
if (n_clutter is None) == (clutter_density is None):
raise ValueError("Must provide n_clutter xor clutter_density in environment config.")
# Overwrite the default reward_decay for goal cycle environments.
super().__init__(*args, **{**kwargs, 'reward_decay': reward_decay})
if clutter_density is not None:
self.n_clutter = int(clutter_density * (self.width - 2) * (self.height - 2))
else:
self.n_clutter = n_clutter
self.reward = reward
self.penalty = penalty
self.initial_reward = initial_reward
self.n_bonus_tiles = n_bonus_tiles
self.reset_on_mistake = reset_on_mistake
self.bonus_tiles = []
self.bonus_tiles_pos = [[] for _ in range(self.n_bonus_tiles)]
self.wall_pos = [[] for _ in range(self.n_clutter)]
def _gen_grid(self, width, height): | self.grid = MultiGrid((width, height)) | 1 | 2023-12-24 06:50:38+00:00 | 12k |
smonsays/modular-hyperteacher | metax/data/imitation.py | [
{
"identifier": "Environment",
"path": "metax/data/envs/base.py",
"snippet": "class Environment(abc.ABC):\n @abc.abstractproperty\n def num_actions(self) -> int:\n \"\"\" Number of possible actions.\"\"\"\n\n @abc.abstractproperty\n def observation_shape(self):\n \"\"\"The shape of the observation array\"\"\"\n\n @abc.abstractmethod\n def observe(self, env_state: EnvironmentState):\n \"\"\"Returns the observation from the environment state.\"\"\"\n\n @abc.abstractmethod\n def reset(self, rng: PRNGKey, goal: Array = None) -> Tuple[Any, EnvironmentInteraction]:\n \"\"\"Resets the environment to an initial state.\"\"\"\n\n @abc.abstractmethod\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n \"\"\"Resets the environment goal.\"\"\"\n\n def step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\"\n\n # return self._step(rng, env_state, action)\n def empty_step(rng, state, action):\n \"\"\"\n Only update time and give no reward.\n \"\"\"\n new_timestep = state.timestep + 1\n new_state = state.replace(timestep=new_timestep)\n new_emission = EnvironmentInteraction(\n observation=self.observe(state),\n reward=0.0,\n done=state.done,\n timestep=new_timestep,\n )\n return new_state, new_emission\n\n # Only run env step if not already done\n return jax.lax.cond(\n env_state.done,\n empty_step,\n self._step,\n rng,\n env_state,\n action,\n )\n\n @abc.abstractmethod\n def _step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\""
},
{
"identifier": "CompositionalGrid",
"path": "metax/data/envs/grid.py",
"snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n frac_ood: float,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n assert grid_size > 5, \"grid_size must be greater than 5\"\n\n self.grid_size = grid_size\n self.num_interactions = num_interactions\n self.num_directions = 4 # split grid into 4 quadrants for the goal position\n self.num_objects = num_objects\n self.num_mazes = num_mazes\n self.num_distractors = num_distractors\n self.frac_ood = frac_ood\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n self.num_factors = 4 # direction, interaction, maze, object\n\n # Static matrices\n self._delta_position = jnp.concatenate((\n jnp.array([[-1, 0], [0, 1], [1, 0], [0, -1]]), # up, right, down, left\n jnp.zeros((self.num_interactions, 2), dtype=jnp.int32), # no movement for interaction\n ))\n size_low, size_high = grid_size // 2, (grid_size // 2) + grid_size % 2\n self._quadrants = jnp.stack((\n np.block([\n [np.ones((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.ones((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.ones((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.ones((size_low, size_low))]\n ]),\n ))\n\n # Pregenerate possible goals and randomly split into in/out of distribution\n self.tasks_all = np.array(list(itertools.product(\n range(self.num_directions),\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n if self.task_support == \"non_compositional\":\n # in/out split with non-compositional support\n self.tasks_in_dist = np.array(list(itertools.product(\n range(self.num_directions - 1), # hold out one goal quadrant from in_dist\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n @partial(np.vectorize, signature=\"(k),(n,k)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n self.tasks_out_dist = self.tasks_all[~elem_in_array(self.tasks_all, self.tasks_in_dist)]\n\n elif \"_hot\" in self.task_support:\n num_hot = int(self.task_support.split(\"_\")[0])\n mask = jnp.sum(self.tasks_all > 0, axis=1) <= num_hot\n self.tasks_in_dist = jnp.array(self.tasks_all[mask])\n self.tasks_out_dist = jnp.array(self.tasks_all[~mask])\n\n elif self.task_support == \"random\":\n self.tasks_all = jax.random.permutation(self.rng, self.tasks_all)\n self.num_ood = int(len(self.tasks_all) * self.frac_ood)\n self.tasks_in_dist = jnp.array(self.tasks_all[: -self.num_ood])\n self.tasks_out_dist = jnp.array(self.tasks_all[-self.num_ood:])\n\n # Make sure all features for every factor are present in the in-distribution tasks\n assert len(jnp.unique(self.tasks_in_dist[:, 0])) == self.num_directions\n assert len(jnp.unique(self.tasks_in_dist[:, 1])) == self.num_interactions\n assert len(jnp.unique(self.tasks_in_dist[:, 2])) == self.num_mazes\n assert len(jnp.unique(self.tasks_in_dist[:, 3])) == self.num_objects\n else:\n raise ValueError(f\"Invalid task support: {self.task_support}\")\n\n assert len(self.tasks_in_dist) > 0\n assert len(self.tasks_out_dist) > 0\n\n # Create random mazes\n if self.num_mazes > 0:\n self.mazes = jnp.stack([\n self.generate_random_maze(self.grid_size, seed=self.seed + i)\n for i in range(self.num_mazes)\n ])\n else:\n self.mazes = jnp.zeros((1, self.grid_size, self.grid_size))\n\n # Precompute optimal paths, this is potentially expensive for large grid sizes\n optimal_paths, shortest_paths = list(\n zip(*[self._precompute_optimal_paths(m) for m in self.mazes])\n )\n self.optimal_paths, shortest_paths = jnp.stack(optimal_paths), jnp.stack(shortest_paths)\n self.valid_goal_dist = shortest_paths >= self.grid_size\n\n @property\n def num_actions(self) -> int:\n return 4 + self.num_interactions\n\n @property\n def observation_shape(self) -> Tuple[int]:\n # encodes positions of agent, objects and walls\n return (self.grid_size, self.grid_size, self.num_objects + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n assert mode in [\"ood\", \"test\", \"train\"]\n if mode == \"ood\":\n task_code = jax.random.choice(rng, self.tasks_out_dist)\n else:\n task_code = jax.random.choice(rng, self.tasks_in_dist)\n\n task_id = jnp.ravel_multi_index(\n task_code,\n dims=(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects),\n mode=\"wrap\",\n )\n emb_dim = max(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects)\n embedding = jax.nn.one_hot(task_code, emb_dim)\n\n return CompositionalGridGoal(*task_code), {\"task_id\": task_id, \"embedding\": embedding}\n\n def reset(\n self, rng: PRNGKey, goal: Optional[CompositionalGridGoal] = None\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_distractor, rng_pos1, rng_pos2, rng_pos3, rng_goal = jax.random.split(rng, 5)\n\n if goal is None:\n # Sample a goal from train distribution if None specified\n goal, _ = self.reset_goal(rng_goal, mode=\"train\")\n\n # Sample distractor objects distinct from goal object\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.num_objects,\n shape=(self.num_distractors,),\n replace=True,\n p=1.0 - (jnp.arange(self.num_objects) == goal.object)\n )\n\n # Sample distinct, random positions for agent, distractors and the goal respecting direction\n position_goal = jax.random.choice(\n key=rng_pos2,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]) * self._quadrants[goal.direction]).reshape(-1),\n )\n goal_coord = self._coord_to_idx(position_goal[0][0], position_goal[0][1])\n position_agent = jax.random.choice(\n key=rng_pos1,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]).reshape(-1) * self.valid_goal_dist[goal.maze][goal_coord]),\n )\n positions_distractors = jax.random.choice(\n key=rng_pos3,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(self.num_distractors, ),\n replace=False,\n p=1.0 - self.mazes[goal.maze].reshape(-1),\n )\n\n positions = jnp.concatenate([position_goal, positions_distractors, position_agent])\n\n env_state = CompositionalGridState(\n done=False, timestep=0, distractors=distractors, positions=positions, goal=goal\n )\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n\n return env_state, emission\n\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1, :]\n\n # Check if agent reached goal (positive reward)\n goal_reached = jnp.logical_and(\n action == (len(MOVES) + env_state.goal.interaction),\n jnp.all(pos_agent == env_state.positions[0, :]),\n )\n reward = 1.0 * goal_reached\n\n # Move the agent to new position and check if valid\n pos_new = self._delta_position[action] + pos_agent\n pos_invalid = jnp.logical_or(\n jnp.logical_or(jnp.any(pos_new < 0), jnp.any(pos_new >= self.grid_size)), # in grid?\n self.mazes[env_state.goal.maze][pos_new[0], pos_new[1]], # in wall?\n )\n pos_new = jnp.where(pos_invalid, pos_agent, pos_new)\n\n # Update state\n positions = env_state.positions.at[-1].set(pos_new)\n env_state = CompositionalGridState(\n done=goal_reached,\n timestep=env_state.timestep + 1,\n distractors=env_state.distractors,\n positions=positions,\n goal=env_state.goal,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: CompositionalGridState) -> Array:\n \"\"\"\n Encode the environment state as an asrray of shape (grid_size, grid_size, num_factors * num_objects + 1).\n For each position in the grid, the code word has the following structure:\n [factor_0_feature_0, ..., factor_0_feature_n, ..., factor_n_feature_0, ..., factor_n_feature_n, wall?, agent?]\n \"\"\"\n objects = jnp.concatenate([jnp.array([env_state.goal.object]), env_state.distractors])\n objects_hot = jax.nn.one_hot(objects, num_classes=self.num_objects)\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros(self.observation_shape)\n grid = grid.at[\n jnp.expand_dims(pos_objects[:, 0], axis=1),\n jnp.expand_dims(pos_objects[:, 1], axis=1),\n :-2,\n ].set(jnp.expand_dims(objects_hot, axis=1))\n grid = grid.at[:, :, -2].set(self.mazes[env_state.goal.maze]) # walls encoded in penultimate channel\n grid = grid.at[pos_agent[0], pos_agent[1], -1].set(1.0) # agent encoded in last channel\n\n return grid\n\n def _features_to_idx(self, features: Array) -> Array:\n \"\"\"Converts features to a unique feature index\"\"\"\n idx = [factor * self.num_objects + feature for factor, feature in enumerate(features)]\n return jnp.array(idx)\n\n def _coord_to_idx(self, x, y):\n \"\"\"Converts coordinates to a unique grid index\"\"\"\n return x * self.grid_size + y\n\n def _idx_to_coord(self, idx):\n \"\"\"Converts a grid index to grid coordinates\"\"\"\n return idx // self.grid_size, idx % self.grid_size\n\n def demonstrate(\n self, rng: PRNGKey, env_state: CompositionalGridState\n ) -> EnvironmentInteraction:\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n pos_agent, pos_goal = env_state.positions[-1, :], env_state.positions[0, :]\n idx_agent, idx_goal = self._coord_to_idx(*pos_agent), self._coord_to_idx(*pos_goal)\n optimal_actions = self.optimal_paths[env_state.goal.maze][idx_agent, idx_goal]\n\n # Fill placeholder actions with correct interaction\n mask_pad = (optimal_actions == -1)\n optimal_actions *= ~mask_pad\n optimal_actions += (len(MOVES) + env_state.goal.interaction) * mask_pad\n\n def env_step(carry, action):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), emission\n\n _, trajectory = jax.lax.scan(env_step, (rng, env_state), optimal_actions)\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission, trajectory\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, optimal_actions\n\n def _precompute_optimal_paths(self, maze: Array):\n \"\"\"Precompute the optimal trajectories for all possible states.\"\"\"\n # Create an array that encodes the graph structure of the grid to compute all shortest paths\n coordinates, no_walls_coords = [], np.argwhere(maze == 0)\n for x, y in no_walls_coords:\n edges = []\n if x > 0 and not maze[x - 1, y]:\n edges.append([x - 1, y])\n if x < self.grid_size - 1 and not maze[x + 1, y]:\n edges.append([x + 1, y])\n if y > 0 and not maze[x, y - 1]:\n edges.append([x, y - 1])\n if y < self.grid_size - 1 and not maze[x, y + 1]:\n edges.append([x, y + 1])\n\n idx_curr = self._coord_to_idx(x, y)\n coordinates += [(idx_curr, self._coord_to_idx(i, k)) for (i, k) in edges]\n\n coordinates = np.array(coordinates)\n connectivity = np.zeros((self.grid_size**2, self.grid_size**2))\n connectivity[coordinates[:, 0], coordinates[:, 1]] = 1.0\n shortest_paths, predecessors = shortest_path(connectivity, return_predecessors=True)\n max_num_actions = (self.grid_size**2) - 1\n\n def get_path(predecessors, start, end):\n \"\"\"Get the full path from the predecessor matrix.\"\"\"\n path = [end]\n while path[-1] != start:\n path.append(predecessors[start, path[-1]])\n return path[::-1]\n\n def path_to_actions(path):\n \"\"\"Convert path to actions.\"\"\"\n # Pad with placeholder actions, need to be overwritten with correct interaction in self.demonstrate()\n actions = np.full((max_num_actions), -1)\n for i in range(len(path) - 1):\n x1, y1 = self._idx_to_coord(path[i])\n x2, y2 = self._idx_to_coord(path[i + 1])\n action = np.array([x2 - x1, y2 - y1])\n action = np.where(np.all(self._delta_position == action, axis=1))[0][0]\n actions[i] = action\n return np.array(actions)\n\n # Precompute optimal paths for all possible positions\n optimal_paths = -1 * np.ones(\n (self.grid_size**2, self.grid_size**2, max_num_actions), dtype=int\n )\n for start in no_walls_coords:\n for goal in no_walls_coords:\n start_idx, goal_idx = self._coord_to_idx(*start), self._coord_to_idx(*goal)\n path = get_path(predecessors, start_idx, goal_idx)\n actions = path_to_actions(path)\n optimal_paths[start_idx, goal_idx, :] = actions\n\n return jnp.array(optimal_paths), jnp.array(shortest_paths)\n\n @staticmethod\n def generate_random_maze(\n grid_size: int, complexity: float = 0.75, density: float = 0.75, seed: int = 0\n ):\n \"\"\"\n Generate a random maze array.\n Walls are encoded as 1 and free space as 0.\n\n Adapted from https://github.com/zuoxingdong/mazelab/blob/master/mazelab/generators/random_maze.py\n which is based on https://en.wikipedia.org/wiki/Maze_generation_algorithm\n \"\"\"\n assert grid_size % 2 == 1, \"Maze size must be odd\"\n grid_size_pad = grid_size + 2\n np_rng = np.random.default_rng(seed)\n\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (grid_size_pad + grid_size_pad)))\n density = int(density * ((grid_size_pad // 2) * (grid_size_pad // 2)))\n\n # Fill borders\n grid = np.zeros((grid_size_pad, grid_size_pad), dtype=bool)\n grid[0, :] = grid[-1, :] = 1\n grid[:, 0] = grid[:, -1] = 1\n\n # Make aisles\n for _ in range(density):\n x, y = (\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n )\n grid[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < grid_size_pad - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < grid_size_pad - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[np_rng.integers(0, len(neighbours))]\n if grid[y_, x_] == 0:\n grid[y_, x_] = 1\n grid[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n\n return grid.astype(int)[1:-1, 1:-1]"
},
{
"identifier": "CompositionalPreference",
"path": "metax/data/envs/preference.py",
"snippet": "class CompositionalPreference(Environment):\n # _layout = \"\"\"\\\n # wwwwwwwwwwwww\n # w w w\n # w w w\n # w w\n # w w w\n # w w w\n # ww wwww w\n # w www www\n # w w w\n # w w w\n # w w\n # w w w\n # wwwwwwwwwwwww\n # \"\"\"\n _layout = \"\"\"\\\nwwwwwww\nw w w\nw w w\nww ww\nw w w\nw w w\nwwwwwww\n\"\"\"\n _delta_position = jnp.array(\n [\n [0, 0], # NOTHING\n [-1, 0], # UP\n [0, 1], # RIGHT\n [1, 0], # DOWN\n [0, -1], # LEFT\n ]\n )\n\n def __init__(\n self,\n num_preferences: int, # ~=num_experts\n num_features: int, # ~=dim layer weight\n num_objects: int,\n num_hot: int, # ~= num_hot\n continuous_combinations: bool,\n discount: float,\n frac_ood: float,\n timelimit: int,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n self.num_preferences = num_preferences\n self.num_features = num_features\n self.num_objects = num_objects\n self.num_hot = num_hot\n self.continuous_combinations = continuous_combinations\n self.discount = discount\n self.frac_ood = frac_ood\n self.timelimit = timelimit\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n\n # We assume a fixed grid.\n self.grid = jnp.array(\n [list(map(lambda c: 0 if c == \" \" else 1, line)) for line in self._layout.splitlines()]\n )\n self.free_coord = jnp.array([(x, y) for (x, y) in zip(*np.where(self.grid == 0))])\n grid_idx_to_coord_matrix = jax.nn.one_hot(\n self.free_coord[:, 0] * self.grid.shape[1] + self.free_coord[:, 1],\n self.grid.shape[0] * self.grid.shape[1],\n )\n self.coord_matrix_to_grid_idx = jnp.argmax(grid_idx_to_coord_matrix.T, axis=-1)\n self.grid_idx_to_coord_matrix = jnp.argmax(grid_idx_to_coord_matrix, axis=-1)\n self.num_free_coord = self.free_coord.shape[0]\n self.num_available_distractors_config = 2**self.num_objects\n self.num_states = self.num_free_coord * self.num_available_distractors_config\n\n self.preference_basis = jax.random.normal(\n self.rng, (self.num_preferences, self.num_features)\n )\n\n # Generate all possible combinations of 1:num_hot experts (num_experts choose num_hot)\n preference_combin_all = []\n for h in range(1, self.num_hot + 1):\n perms = itertools.combinations(range(self.num_preferences), h)\n preference_idx = np.array(list(perms)).reshape(-1, h)\n preference_combin_all_k_hot = self.k_hot(preference_idx)\n preference_combin_all.append(preference_combin_all_k_hot)\n\n preference_combin_all = jnp.concatenate(preference_combin_all)\n\n if self.task_support == \"connected\" or self.task_support == \"disconnected\":\n assert self.num_hot == 2\n assert self.num_preferences > 4 and self.num_preferences % 2 == 0\n # connected: 0 1 2 3 4 5 6 7 01 12 23 34 45 56 67 70 02 13 24 35 46 57 60 71\n preference_combin = [self.k_hot(np.arange(self.num_preferences)[:, None])] # one-hots\n preference_combin.append(self.k_hot(np.stack(( # two-hots 01 12 23 34 45 56 67 70\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 1) % self.num_preferences)).T\n ))\n preference_combin.append(self.k_hot(np.stack(( # two-hots 02 13 24 35 46 57 60 71\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 2) % self.num_preferences)).T\n ))\n preference_combin_connected = np.concatenate(preference_combin)\n\n @partial(np.vectorize, signature=\"(n),(m,n)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n mask_connected = elem_in_array(preference_combin_all, preference_combin_connected)\n\n # disconnected: 1 and 2 hots out of (0,1,2,3) U 1 and 2 hots out of (4,5,6,7)\n mask_1_hot = jnp.sum(preference_combin_all, axis=-1) == 1\n mask_2_hot = jnp.sum(preference_combin_all, axis=-1) == 2\n mask_preference_combin_1 = jnp.all(preference_combin_all[:, :self.num_preferences // 2] == 0, axis=1)\n mask_preference_combin_2 = jnp.all(preference_combin_all[:, self.num_preferences // 2:] == 0, axis=1)\n\n mask_disconnected = (\n (mask_1_hot & mask_preference_combin_1) | (mask_1_hot & mask_preference_combin_2) | (\n mask_2_hot & mask_preference_combin_1) | (mask_2_hot & mask_preference_combin_2)\n )\n\n if self.task_support == \"connected\":\n mask_in_dist = mask_connected\n elif self.task_support == \"disconnected\":\n mask_in_dist = mask_disconnected\n\n mask_out_dist = ~(mask_connected | mask_disconnected)\n\n self.preference_in_dist = jnp.array(preference_combin_all[mask_in_dist])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_out_dist])\n\n elif self.task_support == \"non_compositional\":\n # Non-compositional task support holds-out the last expert in the last layer\n mask_last_expert = preference_combin_all[:, -1] == 1\n self.preference_in_dist = jnp.array(preference_combin_all[~mask_last_expert])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_last_expert])\n\n elif self.task_support == \"random\":\n # Randomly split task experts into in and out distribution tasks\n preference_combin_all = jax.random.permutation(self.rng, preference_combin_all)\n self.num_ood = int(len(preference_combin_all) * self.frac_ood)\n self.preference_in_dist = jnp.array(preference_combin_all[: -self.num_ood])\n self.preference_out_dist = jnp.array(preference_combin_all[-self.num_ood:])\n\n assert len(self.preference_in_dist) > 0\n assert len(self.preference_out_dist) > 0\n\n self.objects_all = jax.random.permutation(self.rng, np.arange(self.num_features))\n\n @partial(jnp.vectorize, excluded=(0,), signature=\"(n)->(m)\")\n def k_hot(self, ind):\n \"\"\"\n Convert a vector of indeces to a k-hot vector.\n Repeating an index does not change the result.\n \"\"\"\n return (jnp.sum(jax.nn.one_hot(ind, self.num_preferences), axis=0) > 0) * 1.0\n\n @property\n def num_actions(self) -> int:\n return len(ACTIONS)\n\n @property\n def observation_shape(self) -> Tuple[int]:\n return (*self.grid.shape, self.num_features + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n # Copied from hyperteacher\n rng_tasks, rng_weights = jax.random.split(rng)\n if mode in [\"test\", \"train\", \"ood\"]:\n task_experts = self.preference_out_dist if mode == \"ood\" else self.preference_in_dist\n task_ids = jax.random.choice(rng_tasks, len(task_experts), shape=())\n embeddings = task_experts[task_ids]\n\n if mode == \"ood\":\n task_ids += len(self.preference_in_dist)\n elif \"ood_\" in mode:\n hotness = int(mode.split(\"_\")[1])\n if hotness <= self.num_hot:\n # Filter the existing task_experts_out_dist for the given hotness\n task_ids = jax.random.choice(\n key=rng_tasks,\n a=len(self.preference_out_dist),\n p=1.0 * jnp.all(\n jnp.sum(self.preference_out_dist, axis=-1) == hotness, axis=-1\n ),\n shape=(),\n )\n embeddings = self.preference_out_dist[task_ids]\n elif hotness <= self.num_preferences:\n # Randomly sample task_experts - everything is ood here\n expert_indeces = jax.random.choice(rng_tasks, self.num_preferences, replace=False, shape=(hotness, ))\n embeddings = self.k_hot(expert_indeces)\n task_ids = -1 * jnp.ones(()) # No unique task IDs available here\n else:\n raise ValueError(f\"Invalid hotness {hotness}\")\n\n if self.continuous_combinations:\n # Sample weights uniformly from simplex (see Willms, 2021)\n weights = jax.random.exponential(rng_weights, shape=embeddings.shape)\n weights = weights * embeddings\n weights = weights / (jnp.sum(weights, axis=-1, keepdims=True) + 1)\n\n # Shift nonzero embeddings to the range [0.5, 1.0] to prevent adding further sparsity\n embeddings = (0.5 * weights + 0.5) * embeddings\n\n return embeddings, {\"task_id\": task_ids, \"embedding\": embeddings[None, :]}\n\n @partial(jax.jit, static_argnums=(0))\n def reset(\n self, rng: PRNGKey, goal: Array = None\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_preference, rng_distractor, rng_pos = jax.random.split(rng, 3)\n\n if goal is None:\n # Sample a preference from train distribution if None specified\n goal, _ = self.reset_goal(rng_preference, mode=\"train\")\n\n preference = goal\n\n # Sample distractors\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.objects_all,\n shape=(self.num_objects,),\n replace=True,\n )\n\n positions = jax.random.choice(\n rng_pos, self.free_coord, shape=(self.num_objects + 1,), replace=False\n )\n\n env_state = PreferenceState(\n done=False,\n timestep=0,\n positions=positions,\n features=distractors,\n available_distractors=jnp.ones((self.num_objects,)),\n preference=preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n return env_state, emission\n\n @partial(jax.jit, static_argnums=(0))\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1][0], env_state.positions[-1][1]\n distractors_pos = env_state.positions[:-1]\n features = env_state.features\n available_distractors = env_state.available_distractors\n\n preference = env_state.preference\n\n next_pos_agent, next_available_distractors, reward = self._move(\n pos_agent, features, available_distractors, distractors_pos, preference, action\n )\n next_timestep = env_state.timestep + 1\n # Update state\n env_state = PreferenceState(\n # If NOTHING is performed, the environment immediately terminates.\n done=jnp.logical_or(next_timestep > self.timelimit, action == ACTIONS.NOTHING.value),\n timestep=next_timestep,\n positions=env_state.positions.at[-1].set(next_pos_agent),\n features=env_state.features,\n available_distractors=next_available_distractors,\n preference=env_state.preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: PreferenceState) -> Array:\n distractor_idx = env_state.features\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros((*self.grid.shape, self.num_features + 2))\n\n grid = grid.at[\n (pos_objects[:, 0]),\n (pos_objects[:, 1]),\n distractor_idx,\n ].set(env_state.available_distractors)\n grid = grid.at[pos_agent[0], pos_agent[1], -2].set(\n 1.0\n ) # agent encoded in penultimate channel\n grid = grid.at[:, :, -1].set(self.grid) # walls encoded in last channel\n\n return grid\n\n def _idx_to_state(self, idx):\n grid_idx = idx // self.num_available_distractors_config\n distractor_config_idx = idx % self.num_available_distractors_config\n coord_packed = self.grid_idx_to_coord_matrix[grid_idx]\n coord = coord_packed // self.grid.shape[1], coord_packed % self.grid.shape[1]\n return coord, (((distractor_config_idx & (1 << np.arange(self.num_objects)))) > 0).astype(\n int\n )\n\n def _state_to_idx(self, coord, available_distractors):\n coord_packed = coord[0] * self.grid.shape[1] + coord[1]\n grid_idx = self.coord_matrix_to_grid_idx[coord_packed]\n distractor_config_idx = available_distractors @ (2 ** jnp.arange(self.num_objects))\n return (grid_idx * self.num_available_distractors_config + distractor_config_idx).astype(\n int\n )\n\n def _move(\n self, pos_agent, features, available_distractors, distractors_pos, preference, action\n ):\n delta_position = self._delta_position[action]\n next_position = pos_agent[0] + delta_position[0], pos_agent[1] + delta_position[1]\n # TODO(@simon): Remove boundary walls to save some input dim and check if within grid size bounds instead\n next_pos_grid = (\n jax.nn.one_hot(next_position[0], self.grid.shape[0])[..., None]\n * jax.nn.one_hot(next_position[1], self.grid.shape[1])[..., None].T\n )\n hit_wall = (self.grid * next_pos_grid).sum()\n next_position = jax.lax.cond(hit_wall, lambda _: pos_agent, lambda _: next_position, None)\n picked_distractor = (next_position[0] == distractors_pos[:, 0]) * (\n next_position[1] == distractors_pos[:, 1]\n )\n\n return (\n next_position,\n available_distractors * (1 - picked_distractor),\n (\n (picked_distractor * available_distractors)\n @ jax.nn.one_hot(features, self.num_features)\n @ self.preference_basis.T\n @ preference\n ),\n )\n\n @partial(jax.jit, static_argnums=(0))\n def demonstrate(self, rng, env_state):\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n action_value_init = jnp.zeros((self.num_states, self.num_actions))\n\n def next_idx_and_reward(idx, action):\n coord, available_distractors = self._idx_to_state(idx)\n next_coord, next_available_feature, reward = self._move(\n coord,\n env_state.features,\n available_distractors,\n env_state.positions[:-1],\n env_state.preference,\n action,\n )\n next_idx = self._state_to_idx(next_coord, next_available_feature)\n # Return the maximum value\n return next_idx, reward\n\n transition_map, reward_map = jax.vmap(\n jax.vmap(next_idx_and_reward, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n\n def bellman_backup(action_value, t):\n def next_value(idx, action):\n next_idx = transition_map[idx, action]\n reward = reward_map[idx, action]\n # Return the maximum value\n return self.discount * action_value[next_idx].max() + reward\n\n next_action_value = jax.vmap(\n jax.vmap(next_value, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n return next_action_value, None\n\n action_value, _ = jax.lax.scan(\n bellman_backup, action_value_init, jnp.arange(self.timelimit)\n )\n\n def env_step(carry, t):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n pos_agent = env_state.positions[-1]\n idx = self._state_to_idx(pos_agent, env_state.available_distractors)\n action = jnp.argmax(action_value[idx])\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), (emission, action_value[idx])\n\n (_, _), (trajectory, action_values) = jax.lax.scan(\n env_step, (rng, env_state), jnp.arange(self.timelimit)\n )\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission,\n trajectory,\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, action_values"
},
{
"identifier": "Dataloader",
"path": "metax/data/base.py",
"snippet": "class Dataloader(abc.ABC):\n def __init__(self, input_shape: Tuple[int], output_dim: int):\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractproperty\n def __len__(self):\n pass\n\n @abc.abstractproperty\n def sample_input(self):\n # Sample input should include batch dimension\n pass\n\n @abc.abstractmethod\n def __iter__(self):\n pass"
},
{
"identifier": "MetaDataset",
"path": "metax/data/base.py",
"snippet": "class MetaDataset(NamedTuple):\n train: Union[Dataset, MultitaskDataset]\n test: Union[Dataset, MultitaskDataset]"
},
{
"identifier": "MultitaskDataset",
"path": "metax/data/base.py",
"snippet": "class MultitaskDataset(NamedTuple):\n x: Array\n y: Array\n task_id: Array\n info: Dict = dict()"
}
] | from functools import partial
from typing import Optional
from chex import PRNGKey
from metax.data.envs.base import Environment
from metax.data.envs.grid import CompositionalGrid
from metax.data.envs.preference import CompositionalPreference
from .base import Dataloader, MetaDataset, MultitaskDataset
import jax
import jax.numpy as jnp
import jax.tree_util as jtu | 10,258 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| class ImitationMetaDataloader(Dataloader): | 3 | 2023-12-22 16:35:49+00:00 | 12k |
willfinnigan/RetroBioCat_2 | rbc2/mcts/mcts.py | [
{
"identifier": "MultiExpander",
"path": "rbc2/expansion/multi_expander.py",
"snippet": "class MultiExpander:\n\n def __init__(self,\n expanders: dict[str: Expander],\n network: Optional[Network] = None):\n\n if len(expanders) == 0:\n raise ValueError(\"No expanders provided\")\n\n self.expanders = expanders\n\n # check that all expanders have the same config\n expander_configs = [expander.config for expander in expanders.values()]\n if len(set(expander_configs)) != 1:\n raise ValueError(\"All expanders must have the same config instance\")\n\n # all expanders should have the same config, so just use the first one\n self.expander_config = list(self.expanders.values())[0].config\n\n # give all expanders the same network\n for expander in self.expanders.values():\n expander.network = network\n\n def get_options(self, smis_to_expand: List[str], combination_by: str = 'order_by_score') -> List[ReactionOption]:\n \"\"\" For multiple smiles, get the options from each expander and combine them using the combination method \"\"\"\n per_expander_options = []\n for name, expander in self.expanders.items():\n options = []\n for smi in smis_to_expand:\n if self.is_expander_blocked(smi, expander):\n continue\n options += expander.get_options(smi)\n\n options = sort_options_by_score(options)\n per_expander_options.append(options)\n\n combination_method = combination_methods[combination_by]\n options = combination_method(per_expander_options)\n return options\n\n def get_reactions(self, smis_to_expand: List[str]) -> List[Reaction]:\n options = self.get_options(smis_to_expand)\n reactions = []\n for opt in options:\n new_reactions = opt.evaluate()\n reactions.extend(new_reactions)\n return reactions\n\n def template_application_counts(self) -> dict:\n \"\"\" Return a dictionary of the number of times a template has been applied for each expander \"\"\"\n counts = {}\n for expander_name, expander in self.expanders.items():\n counts[expander_name] = expander.number_of_rule_applications()\n counts['total'] = sum([x for x in counts.values()])\n return counts\n\n def expander_calls(self) -> dict:\n \"\"\" Return a dictionary of the number of times each expander has been called \"\"\"\n counts = {}\n for expander_name, expander in self.expanders.items():\n counts[expander_name] = expander.number_of_calls()\n counts['total'] = sum([x for x in counts.values()])\n return counts\n\n def is_expander_blocked(self, smi: str, expander: Expander) -> bool:\n \"\"\" Return a list of blocked expanders \"\"\"\n if expander.rxn_domain == 'biocatalysis' or expander.rxn_domain == 'biosynthesis':\n if self.expander_config.use_max_mw_for_enzymes is True:\n if get_mw(smi) > self.expander_config.max_mw_to_use_enzymes:\n return True\n return False"
},
{
"identifier": "Filter",
"path": "rbc2/reaction_evaluation/feasability_filters.py",
"snippet": "RETROBIOCAT_FILTER = 'retrobiocat_filter'\nAIZYNTHFINDER_FILTER = 'aizynthfinder_feasability_filter'\ndef aizynthfinder_feasability(reaction: Reaction) -> float:\ndef retrobiocat_feasability(reaction: Reaction) -> float:"
},
{
"identifier": "DefaultSQLStartingMaterialEvaluator",
"path": "rbc2/reaction_evaluation/starting_material_evaluator/starting_material_evaluator.py",
"snippet": "class DefaultSQLStartingMaterialEvaluator(StartingMaterialEvaluatorInterface):\n vendor_urls = {'mcule': 'https://mcule.com/[[ID]]',\n 'sigma': 'https://www.sigmaaldrich.com/GB/en/search/[[ID]]?focus=products&page=1&perpage=30&sort=relevance&term=[[ID]]&type=product',\n 'lifechem': 'https://shop.lifechemicals.com/compound/[[ID]]',\n 'apollo': 'https://store.apolloscientific.co.uk/search?search=[[ID]]',\n 'alfa': 'https://www.alfa.com/en/catalog/[[ID]]',\n 'zinc': 'https://zinc.docking.org/substances/[[ID]]',\n 'flurochem': 'http://www.fluorochem.co.uk/Products/Product?code=[[ID]]',\n 'molport': 'https://www.molport.com/shop/molecule-link/[[ID]]',\n 'ecmdb': 'https://ecmdb.ca/compounds/[[ID]]'}\n\n available_modes = ['building_blocks', 'metabolites']\n\n def __init__(self, config: Optional[SourceMol_Config] = None, custom_smiles=None, blocked_smiles=None):\n\n if does_source_mols_db_exist() == False:\n download_source_mols_db()\n\n db_path = data_folder + '/source_mols.db'\n self.database = SQLite_Database(db_path)\n self.query = DB_Query_SQLite(self.database)\n self.cache_column_names = {}\n self.cache_vendor_names = {}\n self.config = config\n if self.config is None:\n self.config = SourceMol_Config()\n\n self.custom_smiles = custom_smiles\n if self.custom_smiles is None:\n self.custom_smiles = []\n\n self.blocked_smiles = blocked_smiles\n if self.blocked_smiles is None:\n self.blocked_smiles = []\n\n @lru_cache(maxsize=10000)\n def eval(self, smi):\n if smi in self.blocked_smiles:\n return False, {}\n if smi in self.custom_smiles:\n return True, {}\n\n mode, vendors = self.config.get_mode_and_vendors()\n\n if self.is_mol_chiral(smi) and self.config.source_mols_can_be_chiral is False:\n return False, {}\n\n result = self.query.smiles_lookup(smi, mode, vendors=vendors)\n if result is None:\n return False, {}\n\n info = self._process_info(result, mode)\n\n if self._is_above_max_price_per_gram(info, vendors) == True:\n return False, info\n return True, info\n\n def is_mol_chiral(self, smi):\n if '@' in smi:\n return True\n return False\n\n @lru_cache(maxsize=10)\n def column_names(self, mode):\n if mode not in self.cache_column_names:\n self.cache_column_names[mode] = self.query.get_column_names(mode)\n return self.cache_column_names[mode]\n\n @lru_cache(maxsize=10)\n def vendor_names(self, mode):\n if mode not in self.cache_vendor_names:\n columns = self.column_names(mode)\n vendors = []\n for col in columns:\n if '_id' in col:\n vendors.append(col.replace('_id', ''))\n self.cache_vendor_names[mode] = vendors\n return self.cache_vendor_names[mode]\n\n def _process_info(self, result, mode):\n columns = self.column_names(mode)\n vendors = self.vendor_names(mode)\n info = {k: v for k, v in zip(columns, result)}\n info = {k: v for k, v in info.items() if v is not None}\n\n vendor_info = {}\n for col, value in info.items():\n for vendor in vendors:\n if vendor in col:\n if vendor not in vendor_info:\n vendor_info[vendor] = {}\n vendor_info[vendor][col.replace(f\"{vendor}_\", '')] = value\n if ('_id' in col) and (vendor in self.vendor_urls):\n url = self.vendor_urls[vendor].replace('[[ID]]', value)\n vendor_info[vendor]['url'] = url\n return vendor_info\n\n def _is_above_max_price_per_gram(self, info, requested_vendors):\n \"\"\" Determines whether the price of the molecule is above the maximum price per gram, based on settings in the config \"\"\"\n\n if requested_vendors is None:\n return False\n\n price_too_high = [] # will become list of booleans, one for each vendor, for whether the price is too high\n for vendor in requested_vendors:\n if vendor in info:\n if 'ppg' in info[vendor]:\n if info[vendor]['ppg'] is None:\n price_too_high.append(False)\n elif float(info[vendor]['ppg']) > self.config.max_price_per_gram:\n price_too_high.append(True)\n else:\n price_too_high.append(False)\n else:\n price_too_high.append(False)\n\n\n # if there are only Trues in the list, return True\n if len(price_too_high) == sum(price_too_high):\n return True\n\n # if config.block_if_price_over_max is True, then return True if any of the prices are too high\n if self.config.block_if_price_over_max is True:\n if True in price_too_high:\n return True\n\n # otherwise return False\n return False"
},
{
"identifier": "add_logger",
"path": "rbc2/utils/add_logger.py",
"snippet": "def add_logger(name, level='DEBUG'):\n logger = logging.getLogger(name)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.propagate = False\n return logger"
},
{
"identifier": "logging_config",
"path": "rbc2/configs/logging_config.py",
"snippet": "class LoggingConfig():\n def __init__(self):\n def set_global_mode(self, global_mode):\n def set_mcts_loops_only(self):"
},
{
"identifier": "MCTS_Config",
"path": "rbc2/configs/mcts_config.py",
"snippet": "class MCTS_Config():\n\n def __init__(self):\n\n # mcts setting\n self.max_length = 4\n self.exploration = 1.414 # optimal exploration value for UCB1 is sqrt(2)=1.414 if score [0-1]\n self.max_search_time = 120\n self.max_iterations = None\n self.callback_iterations = 20 # number of iterations before the mcts callback function is called (if set)\n\n # mcts scoring\n self.use_reaction_scores_for_mcts_initial_values = True # recommended this is True, otherwise first round will be random\n self.score_mode = 'basic' # 'basic', 'complexity_penalty', 'mass_percent', ('number_of_atoms'-not implemented yet)\n self.use_pathway_length_score = True # also use a pathway length score (like aizynthfinder)\n\n # values for complexity penalty (if used)\n self.non_buyable_score = 0.2 # the default score for a non buyable compound\n self.max_complexity_penalty = -0.2 # the maximum penalty for a *complex* non buyable compound\n self.rel_complexity_no_penalty = 0 # complexity above this has no penalty\n self.rel_complexity_max_penalty = -1 # complexity below this has max penalty\n\n # multi_expansion options\n self.option_combination_method = 'order_by_score' # ['interleave, order_by_score']\n\n # expansion option scoring\n self.allow_moves_beyond_solved = 0 # number of moves beyond a solved node that are allowed, normally 0\n self.stop_expansion_if_nonbuyable_at_max_length = False # dont expand a mcts_node if a non buyable is found at max length (its impossible to solve)\n self.boost_enzyme_score_if_in_cascade = False\n self.boost_enzyme_in_cascade_score_by = 0.2\n\n\n\n self.max_chemistry_nodes = None\n\n self.chemistry_only_at_beginning_or_end = False # if true, only allow chemistry at beginning or end of pathway\n self.max_chemistry_at_beginning = None # if only allowing chemistry at beginning or end, optionally set a max number of chemistry nodes at beginning\n self.max_chemistry_at_end = None # if only allowing chemistry at beginning or end, optionally set a max number of chemistry nodes at end\n\n # expansion node evaluation\n self.avoid_blocked_reactions = True\n self.blocked_reactions = []\n\n self.merge_reactions_from_same_domain = False\n\n self.chemistry_filter = AIZYNTHFINDER_FILTER\n self.chemistry_filter_cutoff = 0.05\n self.biocatalysis_filter = 'None' # RETROBIOCAT_FILTER\n\n\n def update_from_dict(self, attr_dict):\n current_dict = self.to_dict()\n for key, value in attr_dict.items():\n if key in current_dict:\n setattr(self, key, value)\n return self\n\n def to_dict(self):\n return self.__dict__"
},
{
"identifier": "get_expanders",
"path": "rbc2/expansion/expander_repository.py",
"snippet": "def get_expanders(expander_names: Sequence[str],\n network: Optional[Network] = None,\n expander_config: Optional[Expansion_Config] = None) -> dict[str: Expander]:\n \"\"\" Get a dictionary of expanders from a list of names \"\"\"\n\n # if not expansion config specified, use default\n if expander_config is None:\n expander_config = Expansion_Config()\n\n # get expanders\n expanders = {}\n for name in expander_names:\n if name not in expander_repo:\n raise ValueError(f'Expander {name} not found in repository')\n expanders[name] = expander_repo[name](config=expander_config,\n network=network)\n\n return expanders"
},
{
"identifier": "Expander",
"path": "rbc2/expansion/default_expander_interface.py",
"snippet": "class Expander(ABC):\n \"\"\"The expander interface, which defines the methods that an expander must implement\"\"\"\n\n @abstractmethod\n def __init__(self,\n network: Optional[Network] = None,\n config: Optional[Expansion_Config] = None):\n self.network = network\n self.rxn_type = ''\n self.rxn_domain = ''\n self.config = config\n\n @abstractmethod\n def get_options(self, smi: str) -> List[ReactionOption]:\n pass\n\n @abstractmethod\n def create_option(self, smi: str, name: str, smarts: List[str],\n template_metadata: dict, score: float) -> ReactionOption:\n pass\n\n @abstractmethod\n def get_reactions(self, smi: str) -> List[Reaction]:\n pass\n\n @abstractmethod\n def number_of_rule_applications(self) -> int:\n pass\n\n @abstractmethod\n def number_of_calls(self) -> int:\n pass"
},
{
"identifier": "backpropogate",
"path": "rbc2/mcts/mcts_loop/backpropogate.py",
"snippet": "def backpropogate(node: MCTS_Node, score: float) -> List[MCTS_Node]:\n \"\"\"\n Backpropogate the score up the tree to the root.\n In the process, collect any new solved nodes and return these\n \"\"\"\n\n if node is None:\n return []\n\n new_solved_nodes = []\n while node is not None:\n node.value += score\n node.visits += 1\n if node.visits == 2 and node.solved == True: # on its first visit (starts with a 1, and we just added 1), if it is solved, add to solved nodes\n new_solved_nodes.append(node)\n node = node.parent\n\n new_solved_nodes.reverse() # shorter pathways are solved first\n return new_solved_nodes"
},
{
"identifier": "Expansion",
"path": "rbc2/mcts/mcts_loop/expansion/expand.py",
"snippet": "class Expansion():\n\n def __init__(self,\n multi_expander: MultiExpander,\n starting_material_evaluator: StartingMaterialEvaluator,\n mcts_config: MCTS_Config\n ):\n self.multi_expander = multi_expander\n self.starting_material_evaluator = starting_material_evaluator\n self.mcts_config = mcts_config\n\n def expand(self, node: MCTS_Node) -> List[MCTS_Node]:\n return expand(node, self.multi_expander, self.starting_material_evaluator, self.mcts_config)"
},
{
"identifier": "rollout",
"path": "rbc2/mcts/mcts_loop/rollout.py",
"snippet": "def rollout(node: MCTS_Node,\n expansion: Expansion,\n selection: Selection,\n network: Network,\n filters: dict[str: Filter],\n mcts_config: MCTS_Config) -> Optional[MCTS_Node]:\n \"\"\"\n 1. is node terminal\n 2. if not, expand the node\n 3. selection to go to next node\n 4. if node needs evaluating then do this\n 5. repeat\n \"\"\"\n\n if node is None:\n rollout_logger.debug(f'No rollout because node is None')\n return None\n\n start_depth = node.depth\n\n while node.terminal is False and node.fully_searched is False:\n if node.is_evaluated() is False:\n rollout_logger.debug(f'Evaluating node at depth {node.depth}')\n node = resolve_unevaluated_mcts_node(node, network, filters, mcts_config)\n\n if node.expanded is False:\n expansion.expand(node)\n\n node = selection.select(node, mcts_config.exploration)\n\n if node is None:\n return None\n\n rollout_logger.debug(f'Rollout from depth {start_depth} to depth {node.depth}')\n return node"
},
{
"identifier": "score_node",
"path": "rbc2/mcts/mcts_loop/score_node.py",
"snippet": "def score_node(node: MCTS_Node,\n mcts_config: MCTS_Config,\n starting_material_evaluator: StartingMaterialEvaluatorInterface):\n if node is None:\n return 0\n return score_pathway(node.pathway, mcts_config, starting_material_evaluator)"
},
{
"identifier": "Selection",
"path": "rbc2/mcts/mcts_loop/selection.py",
"snippet": "class Selection():\n\n def __init__(self):\n self.metrics = {}\n\n def select(self, node: MCTS_Node, exploration: float) -> Optional[MCTS_Node]:\n node = selection(node, exploration)\n if node is not None:\n if node.option is not None:\n if node.option.rxn_type not in self.metrics:\n self.metrics[node.option.rxn_type] = 0\n self.metrics[node.option.rxn_type] += 1\n return node"
},
{
"identifier": "create_root",
"path": "rbc2/mcts/tree_node.py",
"snippet": "def create_root(target_smi: str) -> MCTS_Node:\n pathway = Pathway([], target_smi=target_smi)\n root = MCTS_Node(pathway=pathway, is_root=True)\n return root"
},
{
"identifier": "MCTS_Node",
"path": "rbc2/mcts/tree_node.py",
"snippet": "class MCTS_Node():\n\n parent: Optional[MCTS_Node] = None\n pathway: Optional[Pathway] = None\n option: Optional[ReactionOption] = None\n\n terminal: bool = False\n children: list = field(default_factory=list)\n visits: int = 1\n value: float = 0\n solved: bool = False\n fully_searched: bool = False\n expanded: bool = False\n depth: int = 0\n is_root: bool = False\n\n def __post_init__(self):\n self.id = str(uuid.uuid4())\n\n def __hash__(self):\n return hash(self.id)\n\n def is_evaluated(self):\n if self.pathway is None and self.option is None:\n raise Exception(\"MCTS must either have a pathway (evaluated), or an option and a parent (non_evaluated)\")\n if self.option is not None:\n if self.parent is None:\n raise Exception(\"If node is initialised with a ReactionOption, it must have a parent node\")\n elif self.parent.pathway is None:\n raise Exception(\"If node is initialised with a ReactionOption, it's parent node must have a pathway\")\n\n return self.pathway is not None\n\n def get_last_rxn_type(self):\n if self.option is not None:\n return self.option.rxn_type\n if self.pathway is not None:\n if len(self.pathway.reactions) > 0:\n rxn_type = self.pathway.reactions[-1].rxn_type\n return rxn_type\n elif self.is_root == False:\n raise Exception(\"Pathway has no reactions, but is not root\")\n return None"
},
{
"identifier": "Network",
"path": "rbc2/reaction_network_entities/network.py",
"snippet": "class Network():\n \"\"\" Network is used to keep a record of the outcome of all expansions.\"\"\"\n\n def __init__(self, reactions: Sequence[Reaction] = ()):\n\n self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict))\n self.reactions: Set[Reaction] = set()\n\n if len(reactions) != 0:\n for rxn in reactions:\n self.add_reaction(rxn)\n\n def add_reaction(self, reaction: Reaction):\n self.reactions.add(reaction)\n self.smi_produced_by[reaction.product].add(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].add(reaction)\n\n def remove_reaction(self, reaction: Reaction):\n self.reactions.discard(reaction)\n self.smi_produced_by[reaction.product].discard(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].discard(reaction)\n\n def add_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option\n\n def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]):\n self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options}\n\n def remove_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None)\n\n def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]:\n options_for_smi = self.reaction_options.get(smi, {})\n options_for_rxn_type = options_for_smi.get(rxn_type, {})\n return list(options_for_rxn_type.values())\n\n def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool:\n return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False\n\n def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]:\n return self.smi_produced_by.get(smi, set())\n\n def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]:\n return self.smi_substrate_of.get(smi, set())\n\n def all_smis(self) -> Set[Smi]:\n all_smis = set(self.smi_produced_by.keys())\n all_smis.update(set(self.smi_substrate_of.keys()))\n return all_smis\n\n def all_reactions(self) -> List[Reaction]:\n return list(self.reactions)\n\n def all_reaction_options(self) -> List[ReactionOption]:\n all_options = []\n for smi, rxn_type_options in self.reaction_options.items():\n for rxn_type, options_dict in rxn_type_options.items():\n for option_id, option in options_dict.items():\n all_options.append(option)\n return all_options\n\n def save(self):\n \"\"\"Save the network to a dict\"\"\"\n data = {\"reactions\": reactions_to_dicts(self.all_reactions()),\n \"reaction_options\": [option_to_dict(opt) for opt in self.all_reaction_options()]}\n return data\n\n def load(self, data: dict, expanders: List[Expander]):\n \"\"\"\n Load the network from data dict\n ReactionOptions will only be loaded if the relevant expander is provided\n \"\"\"\n\n # check each expander is associated with this network\n for expander in expanders:\n if expander.network != self:\n raise Exception(\"Can not load reaction options when expander is not associated with the same network\")\n\n # load reactions\n reaction_unique_id_dict = {}\n for reaction_dict in data['reactions']:\n reaction = reaction_from_dict(reaction_dict)\n reaction_unique_id_dict[reaction.unique_id] = reaction\n self.add_reaction(reaction)\n\n # load reaction options\n expander_dict = {exp.rxn_type: exp for exp in expanders}\n for opt_dict in data['reaction_options']:\n rxn_type = opt_dict['rxn_type']\n expander = expander_dict.get(rxn_type, None)\n if expander is None:\n continue\n\n option = option_from_dict(opt_dict, expander)\n\n # add reactions from ids\n for unique_id in opt_dict.get('reaction_ids', []):\n reaction = reaction_unique_id_dict.get(unique_id, None)\n if reaction is None:\n continue\n option.reactions.append(reaction)\n\n self.add_option(option)\n\n\n def get_pa_route(self, start_smi, starting_material_evaluator: StartingMaterialEvaluatorInterface):\n def get_smi_produced_by(smi):\n return list(self.smi_produced_by[smi])\n return get_pa_route(start_smi, starting_material_evaluator, get_smi_produced_by)"
},
{
"identifier": "Pathway",
"path": "rbc2/reaction_network_entities/pathway.py",
"snippet": "class Pathway:\n\n def __init__(self, reactions: List[Reaction], target_smi: Optional[str] = None):\n self.reactions = reactions\n\n self.smi_produced_by = defaultdict(set)\n self.smi_substrate_of = defaultdict(set)\n\n for reaction in self.reactions:\n self.smi_produced_by[reaction.product].add(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].add(reaction)\n\n self.product_smis = set(self.smi_produced_by.keys())\n self.substrate_smis = set(self.smi_substrate_of.keys())\n self.all_smis = self.product_smis | self.substrate_smis\n\n if target_smi is not None:\n self.target_smi = target_smi\n self.all_smis.add(self.target_smi)\n else:\n self.target_smi = self._get_target_smi()\n\n self.pathway_length = 0\n self.end_smi_depths: dict[str: int] = {}\n self.tree = self._make_tree(self.target_smi)\n\n def _get_target_smi(self):\n target_smis = [smi for smi in self.product_smis if smi not in self.substrate_smis]\n if len(target_smis) > 1:\n raise Exception('Pathway has multiple targets')\n elif len(target_smis) == 0:\n raise Exception('Pathway has no target')\n return target_smis[0]\n\n def _make_tree(self, smi: str, depth=0) -> dict:\n if self.pathway_length < depth:\n self.pathway_length = depth\n\n tree = {'smiles': smi, 'depth': depth, 'children': []}\n for reaction in self.smi_produced_by[smi]:\n for child_smi in reaction.substrates:\n tree['children'].append(self._make_tree(child_smi, depth=depth+1))\n\n if len(self.smi_produced_by[smi]) == 0:\n self.end_smi_depths[smi] = depth\n\n return tree\n\n def get_pa_route(self, starting_material_evaluator: StartingMaterialEvaluatorInterface):\n def get_smi_produced_by(smi):\n return list(self.smi_produced_by[smi])\n\n return get_pa_route(self.target_smi, starting_material_evaluator, get_smi_produced_by)\n\n def get_smi_producted_by(self, smi: str) -> Reaction:\n reactions = self.smi_produced_by[smi]\n if len(reactions) != 1:\n raise Exception(f'smi {smi} produced by multiple reactions')\n return list(reactions)[0]\n\n def end_smis(self):\n return list(self.end_smi_depths.keys())\n\n def save(self):\n \"\"\"Returns a list of dicts containing the reactions in the pathway\"\"\"\n return [asdict(reaction) for reaction in self.reactions]\n\n def get_reaction_with_product(self, smi: str) -> Optional[Reaction]:\n reactions = self.smi_produced_by[smi]\n if len(reactions) == 0:\n return None\n\n if len(reactions) != 1:\n pathway_logger.warning(f'smi {smi} produced by multiple reactions')\n\n return list(reactions)[0]\n\n def get_reaction_with_substrate(self, smi: str) -> Optional[Reaction]:\n reactions = self.smi_substrate_of[smi]\n\n if len(reactions) == 0:\n return None\n\n if len(reactions) != 1:\n pathway_logger.warning(f'smi {smi} substrate of multiple reactions')\n\n return list(reactions)[0]"
}
] | import time
from typing import Optional, List
from rbc2.expansion.multi_expander import MultiExpander
from rbc2.reaction_evaluation.feasability_filters import Filter, default_filter_repo
from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator import \
DefaultSQLStartingMaterialEvaluator
from rbc2.utils.add_logger import add_logger
from rbc2.configs.logging_config import logging_config
from rbc2.configs.mcts_config import MCTS_Config
from rbc2.expansion.expander_repository import get_expanders
from rbc2.expansion.default_expander_interface import Expander
from rbc2.mcts.mcts_loop.backpropogate import backpropogate
from rbc2.mcts.mcts_loop.expansion.expand import Expansion
from rbc2.mcts.mcts_loop.rollout import rollout
from rbc2.mcts.mcts_loop.score_node import score_node
from rbc2.mcts.mcts_loop.selection import Selection
from rbc2.mcts.tree_node import create_root, MCTS_Node
from rbc2.reaction_network_entities.network import Network
from rbc2.reaction_network_entities.pathway import Pathway | 7,824 |
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
|
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration) | new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config) | 10 | 2023-12-30 11:33:41+00:00 | 12k |
DerwenAI/textgraphs | textgraphs/doc.py | [
{
"identifier": "PAGERANK_ALPHA",
"path": "textgraphs/defaults.py",
"snippet": "PAGERANK_ALPHA: float = 0.85"
},
{
"identifier": "Edge",
"path": "textgraphs/elem.py",
"snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1"
},
{
"identifier": "Node",
"path": "textgraphs/elem.py",
"snippet": "class Node: # pylint: disable=R0902\n \"\"\"\nA data class representing one node, i.e., an extracted phrase.\n \"\"\"\n node_id: int\n key: str\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]\n text: str\n pos: str\n kind: NodeEnum\n loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])\n label: typing.Optional[ str ] = None\n length: int = 1\n sub_obj: bool = False\n count: int = 0\n neighbors: int = 0\n weight: float = 0.0\n entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])\n annotated: bool = False\n\n\n def get_linked_label (\n self\n ) -> typing.Optional[ str ]:\n \"\"\"\nWhen this node has a linked entity, return that IRI.\nOtherwise return its `label` value.\n\n returns:\na label for the linked entity\n \"\"\"\n if len(self.entity) > 0:\n return self.entity[0].iri\n\n return self.label\n\n\n def get_name (\n self\n ) -> str:\n \"\"\"\nReturn a brief name for the graphical depiction of this Node.\n\n returns:\nbrief label to be used in a graph\n \"\"\"\n if self.kind == NodeEnum.IRI:\n return self.label # type: ignore\n if self.kind == NodeEnum.LEM:\n return self.key\n\n return self.text\n\n\n def get_stacked_count (\n self\n ) -> int:\n \"\"\"\nReturn a modified count, to redact verbs and linked entities from\nthe stack-rank partitions.\n\n returns:\ncount, used for re-ranking extracted entities\n \"\"\"\n if self.pos == \"VERB\" or self.kind == NodeEnum.IRI:\n return 0\n\n return self.count\n\n\n def get_pos (\n self\n ) -> typing.Tuple[ int, int ]:\n \"\"\"\nGenerate a position span for `OpenNRE`.\n\n returns:\na position span needed for `OpenNRE` relation extraction\n \"\"\"\n position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )\n return position"
},
{
"identifier": "NodeEnum",
"path": "textgraphs/elem.py",
"snippet": "class NodeEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of node categories\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n LEM = 1 # lemmatized token\n ENT = 2 # named entity\n CHU = 3 # noun chunk\n IRI = 4 # IRI for linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"lem\",\n \"ent\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
},
{
"identifier": "RelEnum",
"path": "textgraphs/elem.py",
"snippet": "class RelEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of edge relations\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n CHU = 1 # `spaCy` noun chunk\n INF = 2 # `REBEL` or `OpenNRE` inferred relation\n SYN = 3 # `sense2vec` inferred synonym\n IRI = 4 # `DBPedia` or `Wikidata` linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"inf\",\n \"syn\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]"
},
{
"identifier": "SimpleGraph",
"path": "textgraphs/graph.py",
"snippet": "class SimpleGraph:\n \"\"\"\nAn in-memory graph used to build a `MultiDiGraph` in NetworkX.\n \"\"\"\n\n def __init__ (\n self\n ) -> None:\n \"\"\"\nConstructor.\n \"\"\"\n self.nodes: typing.Dict[ str, Node ] = OrderedDict()\n self.edges: typing.Dict[ str, Edge ] = {}\n self.lemma_graph: nx.MultiDiGraph = nx.MultiDiGraph()\n\n\n def reset (\n self\n ) -> None:\n \"\"\"\nRe-initialize the data structures, resetting all but the configuration.\n \"\"\"\n self.nodes = OrderedDict()\n self.edges = {}\n self.lemma_graph = nx.MultiDiGraph()\n\n\n def make_node ( # pylint: disable=R0913,R0914\n self,\n tokens: typing.List[ Node ],\n key: str,\n span: spacy.tokens.token.Token,\n kind: NodeEnum,\n text_id: int,\n para_id: int,\n sent_id: int,\n *,\n label: typing.Optional[ str ] = None,\n length: int = 1,\n linked: bool = True,\n ) -> Node:\n \"\"\"\nLookup and return a `Node` object.\nBy default, link matching keys into the same node.\nOtherwise instantiate a new node if it does not exist already.\n\n tokens:\nlist of parsed tokens\n\n key:\nlemma key (invariant)\n\n span:\ntoken span for the parsed entity\n\n kind:\nthe kind of this `Node` object\n\n text_id:\ntext (top-level document) identifier\n\n para_id:\nparagraph identitifer\n\n sent_id:\nsentence identifier\n\n label:\nnode label (for a new object)\n\n length:\nlength of token span\n\n linked:\nflag for whether this links to an entity\n\n returns:\nthe constructed `Node` object\n \"\"\"\n token_id: int = 0\n token_text: str = key\n token_pos: str = \"PROPN\"\n\n if span is not None:\n token_id = span.i\n token_text = span.text\n token_pos = span.pos_\n\n location: typing.List[ int ] = [ # type: ignore\n text_id,\n para_id,\n sent_id,\n token_id,\n ]\n\n if not linked:\n # construct a placeholder node (stopwords)\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n span.text,\n span.pos_,\n kind,\n loc = [ location ],\n length = length,\n )\n\n elif key in self.nodes:\n # link to previously constructed entity node\n self.nodes[key].loc.append(location)\n self.nodes[key].count += 1\n\n # construct a new node for entity or lemma\n else:\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n token_text,\n token_pos,\n kind,\n loc = [ location ],\n label = label,\n length = length,\n count = 1,\n )\n\n node: Node = self.nodes.get(key) # type: ignore\n\n if kind not in [ NodeEnum.CHU, NodeEnum.IRI ]:\n tokens.append(node)\n\n return node # type: ignore\n\n\n def make_edge ( # pylint: disable=R0913\n self,\n src_node: Node,\n dst_node: Node,\n kind: RelEnum,\n rel: str,\n prob: float,\n *,\n debug: bool = False,\n ) -> typing.Optional[ Edge ]:\n \"\"\"\nLookup an edge, creating a new one if it does not exist already,\nand increment the count if it does.\n\n src_node:\nsource node in the triple\n\n dst_node:\ndestination node in the triple\n\n kind:\nthe kind of this `Edge` object\n\n rel:\nrelation label\n\n prob:\nprobability of this `Edge` within the graph\n\n debug:\ndebugging flag\n\n returns:\nthe constructed `Edge` object; this may be `None` if the input parameters indicate skipping the edge\n \"\"\"\n key: str = \".\".join([\n str(src_node.node_id),\n str(dst_node.node_id),\n rel.replace(\" \", \"_\"),\n str(kind.value),\n ])\n\n if debug:\n ic(key)\n\n if key in self.edges:\n self.edges[key].count += 1\n\n elif src_node.node_id != dst_node.node_id:\n # preclude cycles in the graph\n self.edges[key] = Edge(\n src_node.node_id,\n dst_node.node_id,\n kind,\n rel,\n prob,\n )\n\n if debug:\n ic(self.edges.get(key))\n\n return self.edges.get(key)\n\n\n def construct_lemma_graph (\n self,\n *,\n debug: bool = False,\n ) -> None:\n \"\"\"\nConstruct the base level of the _lemma graph_ from the collected\nelements. This gets represented in `NetworkX` as a directed graph\nwith parallel edges.\n\n debug:\ndebugging flag\n \"\"\"\n # add the nodes\n self.lemma_graph.add_nodes_from([\n node.node_id\n for node in self.nodes.values()\n ])\n\n # populate the minimum required node properties\n for node_key, node in self.nodes.items():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"title\"] = node_key\n nx_node[\"size\"] = node.count\n nx_node[\"value\"] = node.weight\n\n if debug:\n ic(nx_node)\n\n # add the edges and their properties\n self.lemma_graph.add_edges_from([\n (\n edge.src_node,\n edge.dst_node,\n {\n \"kind\": str(edge.kind),\n \"title\": edge.rel,\n \"weight\": float(edge.count),\n \"prob\": edge.prob,\n \"count\": edge.count,\n },\n )\n for edge_key, edge in self.edges.items()\n ])\n\n\n def dump_lemma_graph (\n self,\n ) -> str:\n \"\"\"\nDump the _lemma graph_ as a JSON string in _node-link_ format,\nsuitable for serialization and subsequent use in JavaScript,\nNeo4j, Graphistry, etc.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n returns:\na JSON representation of the exported _lemma graph_\n \"\"\"\n # populate the optional node properties\n for node in self.nodes.values():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"name\"] = node.text\n nx_node[\"kind\"] = str(node.kind)\n nx_node[\"iri\"] = node.label\n nx_node[\"subobj\"] = node.sub_obj\n nx_node[\"pos\"] = node.pos\n nx_node[\"loc\"] = str(node.loc)\n\n return json.dumps(\n nx.node_link_data(self.lemma_graph),\n sort_keys = True,\n indent = 2,\n separators = ( \",\", \":\" ),\n )"
},
{
"identifier": "Pipeline",
"path": "textgraphs/pipe.py",
"snippet": "class Pipeline: # pylint: disable=R0902,R0903\n \"\"\"\nManage parsing of a document, which is assumed to be paragraph-sized.\n \"\"\"\n\n def __init__ ( # pylint: disable=R0913\n self,\n text_input: str,\n tok_pipe: spacy.Language,\n ner_pipe: spacy.Language,\n aux_pipe: spacy.Language,\n kg: KnowledgeGraph, # pylint: disable=C0103\n infer_rels: typing.List[ InferRel ],\n ) -> None:\n \"\"\"\nConstructor.\n\n text_input:\nraw text to be parsed\n\n tok_pipe:\nthe `spaCy.Language` pipeline used for tallying individual tokens\n\n ner_pipe:\nthe `spaCy.Language` pipeline used for tallying named entities\n\n aux_pipe:\nthe `spaCy.Language` pipeline used for auxiliary components (e.g., `DBPedia Spotlight`)\n\n kg:\nknowledge graph used for entity linking\n\n infer_rels:\na list of components for inferring relations\n \"\"\"\n self.text: str = text_input\n\n # `tok_doc` provides a stream of individual tokens\n self.tok_doc: spacy.tokens.Doc = tok_pipe(self.text)\n\n # `ner_doc` provides the merged-entity spans from NER\n self.ner_doc: spacy.tokens.Doc = ner_pipe(self.text)\n\n # `aux_doc` e.g., span re-indexing for Spotlight entity linking\n self.aux_doc: spacy.tokens.Doc = aux_pipe(self.text)\n\n self.kg: KnowledgeGraph = kg # pylint: disable=C0103\n self.infer_rels: typing.List[ InferRel ] = infer_rels\n\n # list of Node objects for each parsed token, in sequence\n self.tokens: typing.List[ Node ] = []\n\n # set of Edge objects generated by this Pipeline\n self.edges: typing.List[ Edge ] = []\n\n\n @classmethod\n def get_lemma_key (\n cls,\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ],\n *,\n placeholder: bool = False,\n ) -> str:\n \"\"\"\nCompose a unique, invariant lemma key for the given span.\n\n span:\nspan of tokens within the lemma\n\n placeholder:\nflag for whether to create a placeholder\n\n returns:\na composed lemma key\n \"\"\"\n if isinstance(span, spacy.tokens.token.Token):\n terms: typing.List[ str ] = [\n span.lemma_.strip().lower(),\n span.pos_,\n ]\n\n if placeholder:\n terms.insert(0, str(span.i))\n\n else:\n terms = functools.reduce(\n operator.iconcat,\n [\n [ token.lemma_.strip().lower(), token.pos_, ]\n for token in span\n ],\n [],\n )\n\n return \".\".join(terms)\n\n\n def get_ent_lemma_keys (\n self,\n ) -> typing.Iterator[ typing.Tuple[ str, int ]]:\n \"\"\"\nIterate through the fully qualified lemma keys for an extracted entity.\n\n yields:\nthe lemma keys within an extracted entity\n \"\"\"\n for ent in self.tok_doc.ents:\n yield self.get_lemma_key(ent), len(ent)\n\n\n def link_noun_chunks (\n self,\n nodes: dict,\n *,\n debug: bool = False,\n ) -> typing.List[ NounChunk ]:\n \"\"\"\nLink any noun chunks which are not already subsumed by named entities.\n\n nodes:\ndictionary of `Node` objects in the graph\n\n debug:\ndebugging flag\n\n returns:\na list of identified noun chunks which are novel\n \"\"\"\n chunks: typing.List[ NounChunk ] = []\n\n # first pass: note the available noun chunks\n for sent_id, sent in enumerate(self.tok_doc.sents):\n for span in sent.noun_chunks:\n lemma_key: str = self.get_lemma_key(span)\n\n chunks.append(\n NounChunk(\n span,\n span.text,\n len(span),\n lemma_key,\n lemma_key not in nodes,\n sent_id,\n )\n )\n\n # second pass: remap span indices to the merged entities pipeline\n for i, span in enumerate(self.ner_doc.noun_chunks):\n if span.text == self.tokens[span.start].text:\n chunks[i].unseen = False\n elif chunks[i].unseen:\n chunks[i].start = span.start\n\n if debug:\n ic(chunks[i])\n\n return chunks\n\n\n ######################################################################\n ## relation extraction\n\n def iter_entity_pairs (\n self,\n pipe_graph: nx.MultiGraph,\n max_skip: int,\n *,\n debug: bool = True,\n ) -> typing.Iterator[ typing.Tuple[ Node, Node ]]:\n \"\"\"\nIterator for entity pairs for which the algorithm infers relations.\n\n pipe_graph:\na `networkx.MultiGraph` representation of the graph, reused for graph algorithms\n\n max_skip:\nmaximum distance between entities for inferred relations\n\n debug:\ndebugging flag\n\n yields:\npairs of entities within a range, e.g., to use for relation extraction\n \"\"\"\n ent_list: typing.List[ Node ] = [\n node\n for node in self.tokens\n if node.kind in [ NodeEnum.ENT ]\n ]\n\n for pair in itertools.product(ent_list, repeat = 2):\n if pair[0] != pair[1]:\n src: Node = pair[0]\n dst: Node = pair[1]\n\n try:\n path: typing.List[ int ] = nx.shortest_path(\n pipe_graph,\n source = src.node_id,\n target = dst.node_id,\n weight = \"weight\",\n method = \"dijkstra\",\n )\n\n if debug:\n ic(src.node_id, dst.node_id, path)\n\n if len(path) <= max_skip:\n yield ( src, dst, )\n except nx.NetworkXNoPath:\n pass\n except Exception as ex: # pylint: disable=W0718\n ic(ex)\n ic(\"ERROR\", src, dst)\n traceback.print_exc()"
},
{
"identifier": "PipelineFactory",
"path": "textgraphs/pipe.py",
"snippet": "class PipelineFactory: # pylint: disable=R0903\n \"\"\"\nFactory pattern for building a pipeline, which is one of the more\nexpensive operations with `spaCy`\n \"\"\"\n\n def __init__ ( # pylint: disable=W0102\n self,\n *,\n spacy_model: str = SPACY_MODEL,\n ner: typing.Optional[ Component ] = None,\n kg: KnowledgeGraph = KnowledgeGraph(), # pylint: disable=C0103\n infer_rels: typing.List[ InferRel ] = []\n ) -> None:\n \"\"\"\nConstructor which instantiates the `spaCy` pipelines:\n\n * `tok_pipe` -- regular generator for parsed tokens\n * `ner_pipe` -- with entities merged\n * `aux_pipe` -- spotlight entity linking\n\nwhich will be needed for parsing and entity linking.\n\n spacy_model:\nthe specific model to use in `spaCy` pipelines\n\n ner:\noptional custom NER component\n\n kg:\nknowledge graph used for entity linking\n\n infer_rels:\na list of components for inferring relations\n \"\"\"\n self.ner: typing.Optional[ Component ] = ner\n self.kg: KnowledgeGraph = kg # pylint: disable=C0103\n self.infer_rels: typing.List[ InferRel ] = infer_rels\n\n # determine the NER model to be used\n exclude: typing.List[ str ] = []\n\n if self.ner is not None:\n exclude.append(\"ner\")\n\n # build the pipelines\n # NB: `spaCy` team doesn't quite get the PEP 621 restrictions which PyPa mangled:\n # https://github.com/explosion/spaCy/issues/3536\n # https://github.com/explosion/spaCy/issues/4592#issuecomment-704373657\n if not spacy.util.is_package(spacy_model):\n spacy.cli.download(spacy_model)\n\n self.tok_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n self.ner_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n self.aux_pipe = spacy.load(\n spacy_model,\n exclude = exclude,\n )\n\n # add NER\n if self.ner is not None:\n self.ner.augment_pipe(self)\n\n # `aux_pipe` only: entity linking\n self.kg.augment_pipe(self)\n\n # `ner_pipe` only: merge entities\n self.ner_pipe.add_pipe(\n \"merge_entities\",\n )\n\n\n def create_pipeline (\n self,\n text_input: str,\n ) -> Pipeline:\n \"\"\"\nInstantiate the document pipelines needed to parse the input text.\n\n text_input:\nraw text to be parsed\n\n returns:\na configured `Pipeline` object\n \"\"\"\n pipe: Pipeline = Pipeline(\n text_input,\n self.tok_pipe,\n self.ner_pipe,\n self.aux_pipe,\n self.kg,\n self.infer_rels,\n )\n\n return pipe"
},
{
"identifier": "calc_quantile_bins",
"path": "textgraphs/util.py",
"snippet": "def calc_quantile_bins (\n num_rows: int\n ) -> np.ndarray:\n \"\"\"\nCalculate the bins to use for a quantile stripe,\nusing [`numpy.linspace`](https://numpy.org/doc/stable/reference/generated/numpy.linspace.html)\n\n num_rows:\nnumber of rows in the target dataframe\n\n returns:\ncalculated bins, as a `numpy.ndarray`\n \"\"\"\n granularity = max(round(math.log(num_rows) * 4), 1)\n\n return np.linspace(\n 0,\n 1,\n num = granularity,\n endpoint = True,\n )"
},
{
"identifier": "root_mean_square",
"path": "textgraphs/util.py",
"snippet": "def root_mean_square (\n values: typing.List[ float ]\n ) -> float:\n \"\"\"\nCalculate the [*root mean square*](https://mathworld.wolfram.com/Root-Mean-Square.html)\nof the values in the given list.\n\n values:\nlist of values to use in the RMS calculation\n\n returns:\nRMS metric as a float\n \"\"\"\n s: float = sum(map(lambda x: float(x)**2.0, values)) # pylint: disable=C0103\n n: float = float(len(values)) # pylint: disable=C0103\n\n return math.sqrt(s / n)"
},
{
"identifier": "stripe_column",
"path": "textgraphs/util.py",
"snippet": "def stripe_column (\n values: list,\n bins: int,\n ) -> np.ndarray:\n \"\"\"\nStripe a column in a dataframe, by interpolating quantiles into a set of discrete indexes.\n\n values:\nlist of values to stripe\n\n bins:\nquantile bins; see [`calc_quantile_bins()`](#calc_quantile_bins-function)\n\n returns:\nthe striped column values, as a `numpy.ndarray`\n \"\"\"\n s = pd.Series(values) # pylint: disable=C0103\n q = s.quantile(bins, interpolation = \"nearest\") # pylint: disable=C0103\n\n try:\n stripe = np.digitize(values, q) - 1\n return stripe\n except ValueError as ex:\n # should never happen?\n print(\"ValueError:\", str(ex), values, s, q, bins)\n raise"
},
{
"identifier": "RenderPyVis",
"path": "textgraphs/vis.py",
"snippet": "class RenderPyVis: # pylint: disable=R0903\n \"\"\"\nRender the _lemma graph_ as a `PyVis` network.\n \"\"\"\n HTML_HEIGHT_WITH_CONTROLS: int = 1200\n\n def __init__ (\n self,\n graph: SimpleGraph,\n kg: KnowledgeGraph, # pylint: disable=C0103\n ) -> None:\n \"\"\"\nConstructor.\n\n graph:\nsource graph to be visualized\n\n kg:\nknowledge graph used for entity linking\n \"\"\"\n self.graph: SimpleGraph = graph\n self.kg: KnowledgeGraph = kg #pylint: disable=C0103\n\n\n def render_lemma_graph (\n self,\n *,\n debug: bool = True,\n ) -> pyvis.network.Network:\n \"\"\"\nPrepare the structure of the `NetworkX` graph to use for building\nand returning a `PyVis` network to render.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n debug:\ndebugging flag\n\n returns:\na `pyvis.network.Network` interactive visualization\n \"\"\"\n for node in self.graph.nodes.values():\n nx_node = self.graph.lemma_graph.nodes[node.node_id]\n nx_node[\"shape\"] = NODE_STYLES[node.kind].shape\n nx_node[\"color\"] = NODE_STYLES[node.kind].color\n\n if node.kind in [ NodeEnum.DEP ]:\n nx_node[\"label\"] = \"\"\n elif node.kind in [ NodeEnum.IRI ]:\n nx_node[\"title\"] = node.text\n nx_node[\"label\"] = self.kg.normalize_prefix(node.label) # type: ignore\n else:\n nx_node[\"label\"] = node.text\n\n if node.kind in [ NodeEnum.CHU, NodeEnum.IRI ]:\n nx_node[\"value\"] = 0.0\n\n if debug:\n ic(node.count, node, nx_node)\n\n # prepare the edge labels\n edge_labels: dict = {}\n\n for edge in self.graph.edges.values():\n edge_labels[(edge.src_node, edge.dst_node,)] = ( edge.kind, edge.rel, )\n\n # build the network\n pv_graph: pyvis.network.Network = pyvis.network.Network()\n pv_graph.from_nx(self.graph.lemma_graph)\n\n for pv_edge in pv_graph.get_edges():\n edge_key = ( pv_edge[\"from\"], pv_edge[\"to\"], )\n edge_info = edge_labels.get(edge_key)\n pv_edge[\"title\"] = edge_info[1] # type: ignore\n\n if edge_info[0] in [ RelEnum.DEP ]: # type: ignore\n pv_edge[\"arrows\"] = \"to\" # type: ignore\n pv_edge[\"color\"] = \"ltgray\" # type: ignore\n pv_edge[\"width\"] = 0 # type: ignore\n elif edge_info[0] in [ RelEnum.INF ]: # type: ignore\n pv_edge[\"arrows\"] = \"to\" # type: ignore\n pv_edge[\"color\"] = \"hsl(289, 17%, 49%)\" # type: ignore\n pv_edge[\"width\"] = 3 # type: ignore\n\n return pv_graph\n\n\n def draw_communities (\n self,\n *,\n spring_distance: float = 1.4,\n debug: bool = False,\n ) -> typing.Dict[ int, int ]:\n \"\"\"\nCluster the communities in the _lemma graph_, then draw a\n`NetworkX` graph of the notes with a specific color for each\ncommunity.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n spring_distance:\n`NetworkX` parameter used to separate clusters visually\n\n debug:\ndebugging flag\n\n returns:\na map of the calculated communities\n \"\"\"\n # cluster the communities, using girvan-newman\n comm_iter: typing.Generator = nx.community.girvan_newman(\n self.graph.lemma_graph,\n )\n\n _ = next(comm_iter)\n next_level = next(comm_iter)\n communities: list = sorted(map(sorted, next_level))\n\n if debug:\n ic(communities)\n\n comm_map: typing.Dict[ int, int ] = {\n node_id: i\n for i, comm in enumerate(communities)\n for node_id in comm\n }\n\n # map from community => color\n xkcd_colors: typing.List[ str ] = list(mcolors.XKCD_COLORS.values())\n\n colors: typing.List[ str ] = [\n xkcd_colors[comm_map[n]]\n for n in list(self.graph.lemma_graph.nodes())\n ]\n\n # prep the labels\n labels: typing.Dict[ int, str ] = {\n node.node_id: self.kg.normalize_prefix(node.get_name())\n for node in self.graph.nodes.values()\n }\n\n # ¡dibuja, hombre!\n nx.draw_networkx(\n self.graph.lemma_graph,\n pos = nx.spring_layout(\n self.graph.lemma_graph,\n k = spring_distance / len(communities),\n ),\n labels = labels,\n node_color = colors,\n edge_color = \"#bbb\",\n with_labels = True,\n font_size = 9,\n )\n\n return comm_map\n\n\n def generate_wordcloud (\n self,\n *,\n background: str = \"black\",\n ) -> wordcloud.WordCloud:\n \"\"\"\nGenerate a tag cloud from the given phrases.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n background:\nbackground color for the rendering\n\n returns:\nthe rendering as a `wordcloud.WordCloud` object, which can be used to generate PNG images, etc.\n \"\"\"\n terms: dict = {}\n max_weight: float = 0.0\n\n for node in self.graph.nodes.values():\n if node.weight > 0.0:\n phrase: str = node.text.replace(\" \", \"_\")\n max_weight = max(max_weight, node.weight)\n terms[phrase] = node.weight\n\n freq: dict = {\n phrase: round(weight / max_weight * 1000.0)\n for phrase, weight in terms.items()\n }\n\n cloud: wordcloud.WordCloud = wordcloud.WordCloud(\n background_color = background,\n )\n\n return cloud.generate_from_frequencies(freq)"
}
] | import asyncio
import logging
import os
import sys
import typing
import networkx as nx # pylint: disable=E0401
import numpy as np # pylint: disable=E0401
import pandas as pd # pylint: disable=E0401
import pulp # pylint: disable=E0401
import spacy # pylint: disable=E0401
import transformers # pylint: disable=E0401
import urllib3 # pylint: disable=E0401
from icecream import ic # pylint: disable=E0401
from .defaults import PAGERANK_ALPHA
from .elem import Edge, Node, NodeEnum, RelEnum
from .graph import SimpleGraph
from .pipe import Pipeline, PipelineFactory
from .util import calc_quantile_bins, root_mean_square, stripe_column
from .vis import RenderPyVis
from tqdm.notebook import tqdm # pylint: disable=E0401,W0611
from tqdm import tqdm # pylint: disable=E0401 | 8,139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
Implementation of an LLM-augmented `textgraph` algorithm for
constructing a _lemma graph_ from raw, unstructured text source.
The results provide elements for semi-automated construction or
augmentation of a _knowledge graph_.
This class maintains the state of a graph. Updates get applied by
running methods on `Pipeline` objects, typically per paragraph.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## fix the borked libraries
# workaround: determine whether this is loading into a Jupyter
# notebook, to allow for `tqdm` progress bars
if "ipykernel" in sys.modules:
else:
# override: HF `transformers` and `tokenizers` have noisy logging
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "0"
# override: `OpenNRE` uses `word2vec` which has noisy logging
logging.disable(logging.INFO)
# override: WikidMedia and others allow their SSL certs to expire
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
######################################################################
## class definitions
class TextGraphs (SimpleGraph):
"""
Construct a _lemma graph_ from the unstructured text source,
then extract ranked phrases using a `textgraph` algorithm.
"""
def __init__ (
self,
*,
factory: typing.Optional[ PipelineFactory ] = None,
) -> None:
"""
Constructor.
factory:
optional `PipelineFactory` used to configure components
"""
super().__init__()
# initialize the pipeline factory
if factory is not None:
self.factory = factory
else:
self.factory = PipelineFactory()
def create_pipeline (
self,
text_input: str,
) -> Pipeline:
"""
Use the pipeline factory to create a pipeline (e.g., `spaCy.Document`)
for each text input, which are typically paragraph-length.
text_input:
raw text to be parsed by this pipeline
returns:
a configured pipeline
"""
return self.factory.create_pipeline(
text_input,
)
def create_render (
self
) -> RenderPyVis:
"""
Create an object for rendering the graph in `PyVis` HTML+JavaScript.
returns:
a configured `RenderPyVis` object for generating graph visualizations
"""
return RenderPyVis(
self,
self.factory.kg,
)
def _extract_phrases ( # pylint: disable=R0913
self,
pipe: Pipeline,
sent_id: int,
sent: spacy.tokens.span.Span,
text_id: int,
para_id: int,
lemma_iter: typing.Iterator[ typing.Tuple[ str, int ]],
*,
debug: bool = False,
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
Implementation of an LLM-augmented `textgraph` algorithm for
constructing a _lemma graph_ from raw, unstructured text source.
The results provide elements for semi-automated construction or
augmentation of a _knowledge graph_.
This class maintains the state of a graph. Updates get applied by
running methods on `Pipeline` objects, typically per paragraph.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## fix the borked libraries
# workaround: determine whether this is loading into a Jupyter
# notebook, to allow for `tqdm` progress bars
if "ipykernel" in sys.modules:
else:
# override: HF `transformers` and `tokenizers` have noisy logging
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "0"
# override: `OpenNRE` uses `word2vec` which has noisy logging
logging.disable(logging.INFO)
# override: WikidMedia and others allow their SSL certs to expire
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
######################################################################
## class definitions
class TextGraphs (SimpleGraph):
"""
Construct a _lemma graph_ from the unstructured text source,
then extract ranked phrases using a `textgraph` algorithm.
"""
def __init__ (
self,
*,
factory: typing.Optional[ PipelineFactory ] = None,
) -> None:
"""
Constructor.
factory:
optional `PipelineFactory` used to configure components
"""
super().__init__()
# initialize the pipeline factory
if factory is not None:
self.factory = factory
else:
self.factory = PipelineFactory()
def create_pipeline (
self,
text_input: str,
) -> Pipeline:
"""
Use the pipeline factory to create a pipeline (e.g., `spaCy.Document`)
for each text input, which are typically paragraph-length.
text_input:
raw text to be parsed by this pipeline
returns:
a configured pipeline
"""
return self.factory.create_pipeline(
text_input,
)
def create_render (
self
) -> RenderPyVis:
"""
Create an object for rendering the graph in `PyVis` HTML+JavaScript.
returns:
a configured `RenderPyVis` object for generating graph visualizations
"""
return RenderPyVis(
self,
self.factory.kg,
)
def _extract_phrases ( # pylint: disable=R0913
self,
pipe: Pipeline,
sent_id: int,
sent: spacy.tokens.span.Span,
text_id: int,
para_id: int,
lemma_iter: typing.Iterator[ typing.Tuple[ str, int ]],
*,
debug: bool = False, | ) -> typing.Iterator[ Node ]: | 2 | 2023-12-25 11:42:53+00:00 | 12k |
pkariz/grin-explorer | backend/api/views.py | [
{
"identifier": "fetch_and_store_block",
"path": "backend/api/bootstrap.py",
"snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise NodeBlockNotFoundException()\n if prefetch:\n block_data = get_prefetched_header_and_block_data(blockchain.node, block_height)\n else:\n block_data = node_api.get_block(height=block_height)\n header_data = block_data['header']\n timestamp = parse_datetime(header_data['timestamp'])\n hash = header_data['hash']\n # create header instance\n cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution']))\n with transaction.atomic():\n header, header_created = BlockHeader.objects.get_or_create(\n blockchain=blockchain,\n cuckoo_solution=cuckoo_solution,\n kernel_root=header_data['kernel_root'],\n defaults={\n 'version': header_data['version'],\n 'output_root': header_data['output_root'],\n 'range_proof_root': header_data['range_proof_root'],\n 'kernel_mmr_size': header_data['kernel_mmr_size'],\n 'output_mmr_size': header_data['output_mmr_size'],\n 'nonce': str(header_data['nonce']),\n 'edge_bits': header_data['edge_bits'],\n 'secondary_scaling': header_data['secondary_scaling'],\n 'total_difficulty': header_data['total_difficulty'],\n 'total_kernel_offset': header_data['total_kernel_offset'],\n }\n )\n # create block instance\n try:\n block, block_created = Block.objects.get_or_create(\n blockchain=blockchain,\n hash=hash,\n height=block_height,\n timestamp=timestamp,\n header=header,\n prev_hash=block_data['header']['previous'],\n reorg=None,\n nr_inputs=len(block_data['inputs']),\n nr_outputs=len(block_data['outputs']),\n nr_kernels=len(block_data['kernels']),\n )\n except IntegrityError as e:\n # race condition so it's a duplicate. We can skip creation process\n # and just return the block that we already have\n return Block.objects.get(blockchain=blockchain, hash=hash)\n\n if not block_created:\n # we have already fetched all the data since it's done in an atomic\n # transaction, so skip unnecessary work\n return block\n\n # bulk create kernels\n kernels = []\n for kernel_data in block_data['kernels']:\n kernels.append(\n Kernel(\n block=block,\n features=kernel_data['features'],\n fee=kernel_data['fee'],\n fee_shift=kernel_data['fee_shift'],\n lock_height=kernel_data['lock_height'],\n excess=kernel_data['excess'],\n excess_sig=kernel_data['excess_sig'],\n )\n )\n Kernel.objects.bulk_create(kernels)\n\n inputs = []\n # create input instances\n outputs_data = Output.objects\\\n .filter(\n commitment__in=block_data['inputs'],\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\\\n .values('id', 'commitment')\n outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data }\n for input_data in block_data['inputs']:\n inputs.append(\n Input(\n block=block,\n commitment=input_data,\n output_id=outputs_mapper.get(input_data),\n )\n )\n Input.objects.bulk_create(inputs)\n # mark the corresponding outputs as spent, but only on the main chain so\n # that we don't corrupt the reorged data\n Output.objects.filter(pk__in=outputs_mapper.values()).update(spent=True)\n\n # create output instances\n outputs = []\n inputs = Input.objects\\\n .filter(\n commitment__in=list(map(lambda x: x['commit'], block_data['outputs'])),\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\n inputs_mapper = { input.commitment : input for input in inputs }\n for output_data in block_data['outputs']:\n outputs.append(\n Output(\n block=block,\n output_type=output_data['output_type'],\n commitment=output_data['commit'],\n spent=output_data['spent'],\n proof=output_data['proof'],\n proof_hash=output_data['proof_hash'],\n merkle_proof=output_data['merkle_proof'],\n mmr_index=output_data['mmr_index'],\n )\n )\n outputs = Output.objects.bulk_create(outputs)\n # link inputs to created outputs, but only on the main chain so that we\n # don't corrupt the reorged data\n fixed_inputs = []\n for output in outputs:\n matching_input = inputs_mapper.get(output.commitment)\n if matching_input:\n matching_input.output = output\n fixed_inputs.append(matching_input)\n Input.objects.bulk_update(fixed_inputs, ['output'])\n return block"
},
{
"identifier": "update_blockchain_progress",
"path": "backend/api/bootstrap.py",
"snippet": "def update_blockchain_progress(blockchain):\n try:\n start_height, end_height = blockchain.get_bootstrap_heights()\n except Exception as e:\n logger.warning(\n 'Failed to get bootstrap heights',\n extra={ 'blockchain': blockchain.slug },\n )\n raise UpdateBlockchainProgressError(blockchain.slug)\n expected_heights = set(range(start_height, end_height + 1))\n existing_heights = set(list(\n blockchain.blocks\\\n .filter(reorg__isnull=True)\\\n .values_list('height', flat=True)\n ))\n missing_heights = expected_heights - existing_heights\n update_load_progress(\n blockchain, \n len(missing_heights),\n end_height - start_height + 1,\n 1,\n 1,\n 2,\n verbose=True\n )"
},
{
"identifier": "UpdateBlockchainProgressError",
"path": "backend/api/exceptions.py",
"snippet": "class UpdateBlockchainProgressError(Exception):\n pass"
},
{
"identifier": "get_filter_backends",
"path": "backend/api/helpers.py",
"snippet": "def get_filter_backends(replacements):\n \"\"\"\n Returns a tuple of filter backends where default ones, from DefaultMixin,\n are replaced with the given replacements.\n\n Args:\n replacements: dict where key is an existing filter backend class's\n __name__ and value is its replacement filter backend class\n \"\"\"\n current_filters = DefaultMixin.filter_backends\n return tuple([\n filter if filter.__name__ not in replacements else replacements[filter.__name__]\n for filter in list(current_filters)\n ])"
},
{
"identifier": "load_data_from_redis",
"path": "backend/api/helpers.py",
"snippet": "def load_data_from_redis(redis_key):\n r = redis.Redis(host='redis')\n data = r.get(redis_key)\n if data is None:\n return\n return json.loads(data)"
},
{
"identifier": "BlockFilter",
"path": "backend/api/filters.py",
"snippet": "class BlockFilter(filters.FilterSet):\n class Meta:\n model = Block\n fields = ('blockchain', 'height', 'hash')"
},
{
"identifier": "CustomBlockSearchFilter",
"path": "backend/api/filters.py",
"snippet": "class CustomBlockSearchFilter(DRFfilters.SearchFilter):\n \"\"\"\n Alongside the given search_fields this filter filters also by:\n - keyword 'reorgs' --> return only blocks where reorgs happened\n - ['inputs', 'outputs', 'kernels'] ['=', '<', '>', '<=', '>='] [value] -->\n return only blocks matching this computation, eg: 'inputs > 2'\n You cannot combine different types of search (eg. 'reorgs' + 'computation')\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n queryset = super().filter_queryset(request, queryset, view)\n blockchain_slug = view.kwargs['blockchain_slug']\n original_search_terms = self.get_search_terms(request)\n search_terms = self._get_normalized_search_terms(original_search_terms)\n if len(search_terms) == 0:\n # searches:\n # - height --> add filter reorg=None\n # - hash --> nothing to add\n # - outputhash --> add filter reorg=None\n # - block-detail --> nothing to add\n # - block-list --> add filter reorg=None\n if len(original_search_terms) > 1:\n raise APIException('Too many standard search terms')\n if not original_search_terms:\n # it's either an unfiltered block-list or block-detail\n if view.action == 'list':\n queryset = queryset.filter(reorg=None)\n else:\n # there's only 1 original search term, figure out which one\n if len(original_search_terms[0]) != 64:\n # it's not block hash but either block height or output hash\n # in both cases we need to filter out reorgs\n queryset = queryset.filter(reorg=None)\n return queryset\n searched_types = set(map(lambda x: x['type'], search_terms))\n if len(searched_types) > 1:\n raise APIException('Cannot combine different types of searches')\n if searched_types == { 'reorgs' }:\n return self._get_reorgs_qs(blockchain_slug)\n elif searched_types == { 'computation' }:\n return self._get_computations_qs(search_terms, blockchain_slug)\n elif searched_types == { 'hash' }:\n return self._get_hash_qs(search_terms[0]['value'], blockchain_slug, queryset)\n elif searched_types == { 'height' }:\n return self._get_height_qs(search_terms[0]['value'], blockchain_slug)\n elif searched_types == { 'kernel_or_output' }:\n return self._get_kernel_or_output_qs(\n search_terms[0]['value'], blockchain_slug)\n else:\n logger.exception(\n 'Invalid search terms',\n exc_info=e,\n extra={'search_terms': search_terms}\n )\n raise APIException('Invalid search terms')\n\n def _get_normalized_search_terms(self, search_terms):\n \"\"\"\n Search terms of format ['outputs>1'] are not supported. Instead, the\n operators should be surrounded by spaces, eg. ['outputs', '>', '1'].\n Supported operators are ['=', '>', '<', '<=', '>=']\n \"\"\"\n supported_operators = ['=', '>', '<', '<=', '>=']\n normalized_terms = []\n i = 0\n while i <= len(search_terms) - 1:\n if isinstance(search_terms[i], str) and search_terms[i].lower() in ['inputs', 'outputs', 'kernels']:\n operator = search_terms[i+1]\n if operator not in supported_operators:\n raise APIException('Invalid search operator')\n value = int(search_terms[i+2])\n if value < 0:\n raise APIException('Invalid search computation')\n normalized_terms.append({\n 'type': 'computation',\n 'source': search_terms[i],\n 'op': operator,\n 'value': value,\n })\n i += 3\n elif isinstance(search_terms[i], str) and search_terms[i].lower() == 'reorgs':\n normalized_terms.append({ 'type': 'reorgs' })\n i += 1\n elif len(search_terms[i]) == 64:\n # hash\n normalized_terms.append({\n 'type': 'hash',\n 'value': search_terms[i],\n })\n i += 1\n elif len(search_terms[i]) == 66:\n # kernel excess or output commitment\n normalized_terms.append({\n 'type': 'kernel_or_output',\n 'value': search_terms[i],\n })\n i += 1\n else:\n try:\n value = int(search_terms[i])\n except ValueError:\n value = None\n if value >= 0:\n normalized_terms.append({\n 'type': 'height',\n 'value': value,\n })\n i += 1\n else:\n # term which is not for this custom search, eg. block hash\n i += 1\n return normalized_terms\n\n def _get_reorgs_qs(self, blockchain_slug):\n # NOTE: we first filter, then calculate reorg_len on filtered data and\n # then filter on annotated data that we've calculated\n reorg_heights = list(Reorg.objects\\\n .select_related('start_main_block')\\\n .filter(\n blockchain__slug=blockchain_slug,\n start_main_block__reorg=None,\n )\\\n .annotate(reorg_len=F('end_reorg_block__height') - F('start_reorg_block__height') + 1)\\\n .filter(reorg_len__gte=settings.MIN_REORG_LEN)\\\n .values_list('start_main_block__height', flat=True)\n )\n queryset = Block.objects\\\n .filter(\n blockchain__slug=blockchain_slug,\n reorg=None,\n height__in=reorg_heights,\n )\\\n .order_by('-height')\n return queryset\n\n def _get_hash_qs(self, hash, blockchain_slug, queryset):\n return queryset.filter(\n blockchain__slug=blockchain_slug,\n hash=hash,\n )\n\n def _get_height_qs(self, height, blockchain_slug):\n return Block.objects.filter(\n blockchain__slug=blockchain_slug,\n height=height,\n )\n\n def _get_kernel_or_output_qs(self, kernel_or_output, blockchain_slug):\n kernel = Kernel.objects.filter(\n excess=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if kernel:\n return Block.objects.filter(hash=kernel.block.hash)\n output = Output.objects.filter(\n commitment=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if output:\n return Block.objects.filter(hash=output.block.hash)\n return Block.objects.none()\n\n def _get_computations_qs(self, search_terms, blockchain_slug):\n operator_mapping = {\n '=': '',\n '>': '__gt',\n '<': '__lt',\n '<=': '__lte',\n '>=': '__gte',\n }\n possible_sources = ['inputs', 'outputs', 'kernels']\n searched_sources = set(map(lambda x: x['source'], search_terms))\n op_searched_types = set(possible_sources) & set(searched_sources)\n op_qs = Blockchain.objects.get(slug=blockchain_slug).blocks.all()\n for search_term in search_terms:\n filters = {\n 'blockchain__slug': blockchain_slug,\n 'reorg': None,\n }\n op_map = operator_mapping[search_term['op']]\n filters[f'nr_{search_term[\"source\"]}{op_map}'] = search_term['value']\n op_qs = op_qs.filter(**filters).order_by('-height')\n return op_qs"
},
{
"identifier": "NodeFilter",
"path": "backend/api/filters.py",
"snippet": "class NodeFilter(filters.FilterSet):\n class Meta:\n model = Node\n fields = ('name', 'slug', 'archive')"
},
{
"identifier": "NodeGroupFilter",
"path": "backend/api/filters.py",
"snippet": "class NodeGroupFilter(filters.FilterSet):\n class Meta:\n model = NodeGroup\n fields = ('name', 'slug')"
},
{
"identifier": "CustomModelViewSet",
"path": "backend/api/mixins.py",
"snippet": "class CustomModelViewSet(\n DefaultMixin,\n viewsets.ModelViewSet\n):\n \"\"\"Default viewset for models.\"\"\"\n pass"
},
{
"identifier": "Blockchain",
"path": "backend/api/models.py",
"snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = models.SlugField(max_length=255, unique=True)\n # node from which the data is fetched\n node = models.ForeignKey(\n Node, related_name='blockchains', on_delete=models.PROTECT)\n # the default blockchain will be picked on the gui by default\n default = models.BooleanField(default=False)\n # if fetch_price is False then the shown price will always be 0.\n # Testnets and localnets should have this set to false.\n fetch_price = models.BooleanField(default=True)\n # load_progress shows current % of loaded blocks. If archive is True then\n # load_progress will represent % of missing all blocks, otherwise % of\n # missing blocks from the latest 1440 blocks\n load_progress = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n default=0.0,\n validators=[MinValueValidator(0), MaxValueValidator(100)]\n )\n\n def __str__(self):\n return f'{self.name} - {self.load_progress} [Node<{self.node}>]'\n\n def bootstrap(self, skip_reorg_check=False):\n # import here to avoid cyclic import\n from .bootstrap import load_blocks\n\n start_height, end_height = self.get_bootstrap_heights()\n load_blocks(self, start_height, end_height, skip_reorg_check)\n\n def get_tip_height(self):\n node_api = NodeV2API(self.node)\n try:\n end_block = node_api.get_tip()['height']\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get node tip')\n raise e\n return end_block\n\n def get_progress_decimal_places(self):\n if self.node.archive:\n return 2\n return 0\n\n def get_bootstrap_heights(self):\n node_api = NodeV2API(self.node)\n end_height = self.get_tip_height()\n try:\n start_height = node_api.get_blocks(0, end_height, 1, False)['blocks'][0]['header']['height']\n except IndexError:\n raise Exception('Node has no blocks.')\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get first block height')\n raise e\n return start_height, end_height\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n if self.default:\n # set other blockchain.default to False\n other_blockchains = Blockchain.objects.all()\n if self.pk:\n other_blockchains = other_blockchains.exclude(pk=self.pk)\n other_blockchains.update(default=False)\n # blockchain doesn't change much so this call doesn't hurt\n old_instance = Blockchain.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.load_progress != old_instance.load_progress:\n # load progress changed, send info\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_progress_changed',\n 'message': {\n 'slug': self.slug,\n # convert to float since Decimal is not serializable\n 'load_progress': float(self.load_progress),\n },\n }\n )\n return res\n\n def full_print(self):\n \"\"\"Used for developing and debugging.\"\"\"\n print('MAIN CHAIN:')\n for block in self.blocks.filter(reorg=None).order_by('height'):\n print(' --> ' + block.hash)\n for reorg in Reorg.objects.filter(blockchain=self):\n print('REORG:')\n for block in Block.objects.filter(reorg=reorg).order_by('height'):\n print(' --> ' + block.hash)\n print('------------------------------------------------------')\n\n def reset(self):\n \"\"\"Used for developing and debugging.\"\"\"\n from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg\n from django.contrib.contenttypes.models import ContentType\n from decimal import Decimal\n\n Input.objects.filter(block__blockchain=self).delete()\n Output.objects.filter(block__blockchain=self).delete()\n Kernel.objects.filter(block__blockchain=self).delete()\n self.reorgs.all().delete()\n\n content_type = ContentType.objects.get_for_model(self)\n DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=self.id,\n ).delete()\n # removing header will also remove the block\n BlockHeader.objects.filter(block__blockchain=self).delete()\n self.load_progress = Decimal('0')\n self.save()"
},
{
"identifier": "Block",
"path": "backend/api/models.py",
"snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')"
},
{
"identifier": "Reorg",
"path": "backend/api/models.py",
"snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)"
},
{
"identifier": "Node",
"path": "backend/api/models.py",
"snippet": "class Node(TimeStampedModel):\n \"\"\"Node on the network. Currently it only supports grin-rust.\"\"\"\n id = models.BigAutoField(primary_key=True)\n # name can be whatever\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n group = models.ForeignKey(\n NodeGroup, related_name='nodes', on_delete=models.PROTECT)\n # foreign api url of the grin-rust node\n api_url = models.URLField()\n # username of the grin-rust node\n api_username = models.CharField(max_length=255)\n # foreign api secret of the grin-rust node\n api_password = models.CharField(max_length=255)\n # if archive is true then we fetch every block when we bootstrap, otherwise\n # we fetch only latest 1440 blocks (1 day)\n archive = models.BooleanField(default=False)\n\n def __str__(self):\n repr = f'{self.name}'\n if self.archive:\n repr += ' (archive)'\n return repr\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n return super().save(*args, **kwargs)\n\n def is_reachable(self):\n try:\n NodeV2API(self).get_tip()\n return True\n except (\n RequestsConnectionError,\n RequestsTimeout,\n RequestsHTTPError,\n RequestsReadTimeout\n ):\n logger.exception('Node unreachable', extra={'node': self.slug})\n return False"
},
{
"identifier": "NodeGroup",
"path": "backend/api/models.py",
"snippet": "class NodeGroup(models.Model):\n \"\"\"\n NodeGroup represents a group of nodes. These nodes should be on the same\n network.:\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n # name is probably mainnet, testnet or smth similar\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n self.full_clean()\n return super().save(*args, **kwargs)"
},
{
"identifier": "DramatiqTask",
"path": "backend/api/models.py",
"snippet": "class DramatiqTask(TimeStampedModel):\n \"\"\"We store task's message_id so that we can abort the task.\"\"\"\n\n class Type(models.TextChoices):\n BOOTSTRAP = 'bootstrap', 'Bootstrap'\n BLOCKCHAIN_DELETE = 'blockchain_delete', 'Blockchain delete'\n\n class Status(models.TextChoices):\n # NOTE: IN_PROGRESS doesn't really mean it's already in progress, just\n # that it has been sent\n IN_PROGRESS = 'in_progress', 'In progress'\n SKIPPED = 'skipped', 'Skipped'\n SUCCESS = 'success', 'Success'\n FAILURE = 'failure', 'Failure'\n\n id = models.BigAutoField(primary_key=True)\n message_id = models.CharField(max_length=255, unique=True)\n # type tells us what this task is doing, eg. 'bootstrap'\n type = models.CharField(max_length=255, choices=Type.choices)\n status = models.CharField(max_length=255, choices=Status.choices)\n # failure_reason should be short and concise\n failure_reason = models.TextField(null=True, default=None)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def save(self, *args, **kwargs):\n from .serializers import DramatiqTaskSerializer\n old_instance = DramatiqTask.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.status != old_instance.status:\n # status changed, send info\n print('sending task status update')\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'task_status_changed',\n 'message': DramatiqTaskSerializer(self).data,\n }\n )\n return res"
},
{
"identifier": "BlockchainSerializer",
"path": "backend/api/serializers.py",
"snippet": "class BlockchainSerializer(serializers.ModelSerializer):\n node = serializers.PrimaryKeyRelatedField(queryset=Node.objects.all(), write_only=True)\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'default', 'node', 'load_progress', 'fetch_price')"
},
{
"identifier": "BlockchainExtendedSerializer",
"path": "backend/api/serializers.py",
"snippet": "class BlockchainExtendedSerializer(serializers.ModelSerializer):\n tasks = serializers.SerializerMethodField()\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'node', 'default', 'load_progress', 'fetch_price', 'tasks')\n\n def to_representation(self, obj):\n self.fields['node'] = NodeSerializer()\n return super().to_representation(obj)\n\n def get_tasks(self, blockchain):\n content_type = ContentType.objects.get_for_model(blockchain)\n tasks = DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=blockchain.id,\n )\n return DramatiqTaskSimpleSerializer(tasks, many=True).data"
},
{
"identifier": "BlockSerializer",
"path": "backend/api/serializers.py",
"snippet": "class BlockSerializer(serializers.ModelSerializer):\n blockchain = BlockchainSerializer()\n header = BlockHeaderSerializer()\n starting_reorg_blocks = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'reorg',\n 'nr_kernels',\n 'nr_inputs',\n 'nr_outputs',\n 'blockchain',\n 'starting_reorg_blocks',\n )\n\n def get_starting_reorg_blocks(self, block):\n reorgs = Reorg.objects.filter(start_main_block=block)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return BlockSerializer(\n [reorg.start_reorg_block for reorg in reorgs], many=True).data"
},
{
"identifier": "BlockDetailSerializer",
"path": "backend/api/serializers.py",
"snippet": "class BlockDetailSerializer(serializers.ModelSerializer):\n header = BlockHeaderSerializer()\n kernels = KernelSerializer(many=True)\n inputs = InputSerializer(many=True)\n outputs = OutputSerializer(many=True)\n blockchain = BlockchainSerializer()\n confirmations = serializers.SerializerMethodField()\n next_hash = serializers.SerializerMethodField()\n next_block_reorgs = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'kernels',\n 'inputs',\n 'outputs',\n 'blockchain',\n 'confirmations',\n 'next_hash',\n 'reorg',\n 'next_block_reorgs',\n )\n\n def get_confirmations(self, block):\n # in reorged blocks we show confirmations based on the reorged chain!\n tip_height = block.blockchain.blocks\\\n .filter(reorg=block.reorg)\\\n .order_by('-height')\\\n .first().height\n return tip_height - block.height + 1\n\n def get_next_hash(self, block):\n try:\n return Block.objects.get(\n blockchain=block.blockchain,\n reorg=block.reorg,\n prev_hash=block.hash\n ).hash\n except Block.DoesNotExist:\n return None\n\n def get_next_block_reorgs(self, block):\n from .serializers import ReorgSerializer\n reorgs = Reorg.objects.filter(start_main_block__prev_hash=block.hash)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return ReorgSerializer(reorgs, many=True).data"
},
{
"identifier": "NodeSerializer",
"path": "backend/api/serializers.py",
"snippet": "class NodeSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Node\n fields = '__all__'"
},
{
"identifier": "NodeGroupSerializer",
"path": "backend/api/serializers.py",
"snippet": "class NodeGroupSerializer(serializers.ModelSerializer):\n nodes = NodeSerializer(many=True, read_only=True)\n\n class Meta:\n model = NodeGroup\n fields = '__all__'"
},
{
"identifier": "DramatiqTaskSerializer",
"path": "backend/api/serializers.py",
"snippet": "class DramatiqTaskSerializer(serializers.ModelSerializer):\n content_object = serializers.SerializerMethodField()\n\n class Meta:\n model = DramatiqTask\n fields = (\n 'id',\n 'message_id',\n 'type',\n 'status',\n 'failure_reason',\n 'content_object',\n )\n\n def get_content_object(self, task):\n from .serializers import BlockchainSerializer\n serializer_mapper = {\n 'Blockchain': BlockchainSerializer,\n }\n klass = task.content_object.__class__\n return {\n 'model': klass._meta.model_name,\n 'data': serializer_mapper[klass.__name__](task.content_object).data,\n }"
},
{
"identifier": "bootstrap_blockchain",
"path": "backend/api/tasks.py",
"snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef bootstrap_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).bootstrap()"
},
{
"identifier": "delete_blockchain",
"path": "backend/api/tasks.py",
"snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef delete_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).delete()\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_deleted',\n 'message': {\n 'slug': blockchain_slug,\n },\n }\n )"
}
] | from asgiref.sync import async_to_sync
from django.contrib.contenttypes.models import ContentType
from django.db.models.deletion import ProtectedError
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from dramatiq_abort import abort
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.exceptions import NotFound
from rest_framework.exceptions import ValidationError as DRFValidationError
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from slugify import slugify
from .bootstrap import fetch_and_store_block, update_blockchain_progress
from .exceptions import UpdateBlockchainProgressError
from .helpers import get_filter_backends, load_data_from_redis
from .filters import (
BlockFilter,
CustomBlockSearchFilter,
NodeFilter,
NodeGroupFilter,
)
from .mixins import CustomModelViewSet
from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask
from .serializers import (
BlockchainSerializer,
BlockchainExtendedSerializer,
BlockSerializer,
BlockDetailSerializer,
NodeSerializer,
NodeGroupSerializer,
DramatiqTaskSerializer,
)
from .tasks import bootstrap_blockchain, delete_blockchain
import channels
import logging
import pytz | 8,844 |
logger = logging.getLogger(__name__)
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class NodeGroupViewSet(CustomModelViewSet):
"""API endpoint for NodeGroup."""
queryset = NodeGroup.objects.all()
filterset_class = NodeGroupFilter
serializer_class = NodeGroupSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
return super().create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node group is related to nodes, delete them first')
class NodeViewSet(CustomModelViewSet):
"""API endpoint for Node."""
queryset = Node.objects.all()
filterset_class = NodeFilter
serializer_class = NodeSerializer
# currently all node views require authentication
permission_classes = [IsAuthenticated]
lookup_field = 'slug'
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
# NOTE: super().partial_update calls update(..., partial=True)
if not kwargs.get('partial'):
# we don't allow full updates - aka PUT
raise DRFPermissionDenied()
return super().update(request, *args, **kwargs)
def partial_update(self, request, slug=None):
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().partial_update(request, slug=slug)
@action(detail=True, methods=['get'])
def reachable(self, request, slug=None):
node = self.get_object()
try:
res = node.is_reachable()
except Exception as e:
logger.exception('Unreachable node')
res = False
return Response(res, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node is related to blockchains, delete them first')
class BlockchainViewSet(CustomModelViewSet):
"""API endpoint for Blockchain."""
|
logger = logging.getLogger(__name__)
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class NodeGroupViewSet(CustomModelViewSet):
"""API endpoint for NodeGroup."""
queryset = NodeGroup.objects.all()
filterset_class = NodeGroupFilter
serializer_class = NodeGroupSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
return super().create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node group is related to nodes, delete them first')
class NodeViewSet(CustomModelViewSet):
"""API endpoint for Node."""
queryset = Node.objects.all()
filterset_class = NodeFilter
serializer_class = NodeSerializer
# currently all node views require authentication
permission_classes = [IsAuthenticated]
lookup_field = 'slug'
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
# NOTE: super().partial_update calls update(..., partial=True)
if not kwargs.get('partial'):
# we don't allow full updates - aka PUT
raise DRFPermissionDenied()
return super().update(request, *args, **kwargs)
def partial_update(self, request, slug=None):
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().partial_update(request, slug=slug)
@action(detail=True, methods=['get'])
def reachable(self, request, slug=None):
node = self.get_object()
try:
res = node.is_reachable()
except Exception as e:
logger.exception('Unreachable node')
res = False
return Response(res, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node is related to blockchains, delete them first')
class BlockchainViewSet(CustomModelViewSet):
"""API endpoint for Blockchain.""" | queryset = Blockchain.objects.all() | 10 | 2023-12-24 22:15:11+00:00 | 12k |
datrocity/pond | pond/activity.py | [
{
"identifier": "Artifact",
"path": "pond/artifact/artifact.py",
"snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if, for instance, it is sent by email.\n \"\"\"\n\n # --- Artifact class interface\n\n # todo: what is the class_id for?\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n # todo this exception is not defined here\n raise InvalidArtifactClass(class_id)\n return subclass\n\n # --- Artifact public interface\n\n def __init__(self, data, metadata=None):\n \"\"\" Create an Artifact.\n\n Parameters\n ----------\n data: any\n The data of the artifact.\n metadata: dict\n User-defined metadata, saved with the artifact (optional).\n The metadata keys and values will be stored as strings.\n \"\"\"\n self.data = data\n if metadata is None:\n metadata = {}\n self.metadata = metadata\n\n @classmethod\n def read(cls, path, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a file, given the path.\n\n Parameters\n ----------\n path: str\n Filename from which the artifact is read.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Additional parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n with open(path, 'rb') as f:\n artifact = cls.read_bytes(f, metadata, **kwargs)\n return artifact\n\n @classmethod\n def read_bytes(cls, file_, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n artifact = cls._read_bytes(file_, **kwargs)\n if metadata is not None:\n artifact.metadata = metadata\n return artifact\n\n # todo why the kwargs\n def write(self, path, **kwargs):\n \"\"\" Writes the artifact to file.\n\n Parameters\n ----------\n path: str\n Path to which the artifact is written.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n with open(path, 'wb') as f:\n self.write_bytes(f, **kwargs)\n\n # --- Abstract interface\n\n @staticmethod\n @abstractmethod\n def filename(basename):\n \"\"\" Complete a base filename with an extension.\n\n Parameters\n ----------\n basename: str\n The filename without extension.\n\n Returns\n -------\n filename: str\n The completed filename.\n\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def _read_bytes(cls, file_, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n This is a private method that loads the artifact from a binary file without dealing with\n the logic of the external metadata. It is called by `Artifact.read_bytes`.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n pass\n\n @abstractmethod\n def write_bytes(self, file_, **kwargs):\n \"\"\" Writes the artifact to binary file.\n\n This method also need to take care of writing the artifact metadata in the file itself,\n whenever possible.\n If the artifact is being written as a `pond` `VersionedArtifact`, then the metadata is also\n stored in an external manifest.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object to which the artifact is written, opened in binary mode.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n pass\n\n def get_artifact_metadata(self):\n \"\"\"\n This is not the user metadata!\n\n Returns\n -------\n\n \"\"\"\n return None"
},
{
"identifier": "ArtifactRegistry",
"path": "pond/artifact/artifact_registry.py",
"snippet": "class ArtifactRegistry:\n def __init__(self):\n def register(self, artifact_class, data_class, format=None):\n def get_available_artifacts(self, data_class):\n def get_artifact(self, data_class, format=None):"
},
{
"identifier": "DataType",
"path": "pond/conventions.py",
"snippet": "class WriteMode(str, Enum):\n OVERWRITE = 'overwrite'\n ERROR_IF_EXISTS = 'errorifexists'\nMANIFEST_FILENAME = 'manifest.yml'\nMETADATA_DIRNAME = '_pond'\nTXT_ENCODING = 'utf-8'\nVERSIONS_LOCK_FILENAME = '_VERSIONS_LOCK'\ndef urijoinpath(*parts: str) -> str:\ndef versioned_artifact_location(location: str, artifact_name: str):\ndef version_location(location: str, version_name: VersionName) -> str:\ndef versions_lock_file_location(location: str) -> str:\ndef version_data_location(version_location: str, data_filename: str) -> str:\ndef version_manifest_location(version_location: str) -> str:\ndef version_uri(datastore_id: str, location: str, artifact_name: str, version_name: VersionName):"
},
{
"identifier": "MetadataSource",
"path": "pond/metadata/metadata_source.py",
"snippet": "class MetadataSource:\n \"\"\" Represents a source of metadata.\n\n The metadata is collected using the `collect` method. Note that two calls to `collect` can\n return different values, as the metadata could be collected on the fly, as in the case of a\n time stamp, a git SHA, or other.\n\n Metadata keys and values must both be strings.\n \"\"\"\n\n @abstractmethod\n def section_name(self) -> str:\n \"\"\" Name of the section in the manifest corresponding to this metadata. \"\"\"\n return ''\n\n @abstractmethod\n def collect(self) -> dict[str, str]:\n \"\"\" Collect all the metadata in a dictionary.\n\n Keys and values must both be strings.\n \"\"\"\n return {}"
},
{
"identifier": "DictMetadataSource",
"path": "pond/metadata/dict.py",
"snippet": "class DictMetadataSource(MetadataSource):\n\n def __init__(self, name: str, metadata: dict[str, Any]):\n \"\"\" A dictionary used as source of metadata.\n\n Parameters\n ----------\n name: str\n The name of the section represented by this metadata source.\n metadata: dict[str, Any]\n The dictionary of metadata. Values will be converted to string.\n \"\"\"\n self.name = name\n self.metadata = metadata\n\n def section_name(self) -> str:\n return self.name\n\n def collect(self) -> dict[str, str]:\n return {k: str(v) for k,v in self.metadata.items()}"
},
{
"identifier": "Manifest",
"path": "pond/metadata/manifest.py",
"snippet": "class Manifest:\n\n # --- Manifest class interface\n\n def __init__(self):\n self._sections = {}\n\n @classmethod\n def from_yaml(cls, manifest_location, datastore):\n \"\"\"\n\n Parameters\n ----------\n manifest_location\n datastore\n\n Returns\n -------\n\n \"\"\"\n manifest_dict = datastore.read_yaml(manifest_location)\n return cls.from_nested_dict(manifest_dict)\n\n @classmethod\n def from_nested_dict(cls, manifest_dict: dict):\n manifest = cls()\n for section_name, metadata in manifest_dict.items():\n # TODO make this a FrozendictMetadataSource\n source = DictMetadataSource(name=section_name, metadata=metadata)\n manifest.add_section(source)\n return manifest\n\n # --- Manifest public interface\n\n def to_yaml(self, manifest_location, datastore):\n metadata = self.collect()\n datastore.write_yaml(manifest_location, metadata)\n\n def add_section(self, metadata_source):\n \"\"\"\n\n Parameters\n ----------\n metadata_source\n If None, nothing is added but no exception is raised.\n\n Returns\n -------\n\n \"\"\"\n if metadata_source is not None:\n self._sections[metadata_source.section_name()] = metadata_source\n\n def collect_section(self, name, default_metadata=None):\n source = self._sections.get(name, None)\n if source is None:\n metadata = default_metadata\n else:\n metadata = source.collect()\n return metadata\n\n def collect(self):\n dict_ = {}\n for name, source in self._sections.items():\n source_metadata = {k: str(v) for k, v in source.collect().items()}\n dict_[name] = source_metadata\n return dict_"
},
{
"identifier": "Datastore",
"path": "pond/storage/datastore.py",
"snippet": "class Datastore(ABC):\n \"\"\" Versioned storage for the artifacts.\n\n Parameters\n ----------\n id: str\n Unique identifier for the datastore. This is used in the URI for each versioned\n artifact to uniquely identify the artifact.\n \"\"\"\n\n # -- Datastore class interface\n\n def __init__(self, id: str):\n self.id = id\n\n # -- Abstract interface\n\n @abstractmethod\n def open(self, path: str, mode: str) -> IO[Any]:\n \"\"\" Open a file-like object\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n mode: str\n Specifies the mode in which the file is opened.\n\n Returns\n -------\n IO[Any]\n An open file-like object.\n\n \"\"\"\n pass\n\n @abstractmethod\n def read(self, path: str) -> bytes:\n \"\"\" Read a sequence of bytes from the data store.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bytes\n The sequence of bytes read from `path`.\n\n Raises\n ------\n FileNotFoundError\n If the requested path does not exist.\n \"\"\"\n pass\n\n @abstractmethod\n def write(self, path: str, data: bytes) -> None:\n \"\"\" Write a sequence of bytes to the data store.\n\n `path` contains the path relative to the root of the data store, including the name\n of the file to be created. If a file already exists at `path`, it is overwritten.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n data: bytes\n Sequence of bytes to write at `path`.\n \"\"\"\n pass\n\n @abstractmethod\n def exists(self, path: str) -> bool:\n \"\"\" Returns True if the file exists.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bool\n True if the file exists, false otherwise\n \"\"\"\n ...\n\n @abstractmethod\n def delete(self, path: str, recursive: bool = False) -> None:\n \"\"\"Deletes a file or directory\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n recursive: bool, optional, default is False\n Whether to recursively delete the location\n \"\"\"\n ...\n\n @abstractmethod\n def makedirs(self, path: str) -> None:\n \"\"\" Creates the specified directory if needed.\n\n If the directories already exist, the method does not do anything.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n \"\"\"\n ...\n\n # -- Read/write utility methods\n\n def read_string(self, path: str) -> str:\n \"\"\" Read a string from a file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n str\n The read string\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return self.read(path).decode(TXT_ENCODING)\n\n def write_string(self, path: str, content: str) -> None:\n \"\"\" Write a string to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n content: str\n Content to write\n \"\"\"\n self.write(path, content.encode(TXT_ENCODING))\n\n def read_yaml(self, path: str) -> Any:\n \"\"\" Read and parse a YAML file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return yaml_load(self.read_string(path))\n\n def write_yaml(self, path: str, content: Any) -> None:\n \"\"\" Serializes to YAML and write an object to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, yaml_dump(content))\n\n def read_json(self, path: str) -> Any:\n \"\"\" Read and parse a JSON file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return json.loads(self.read_string(path))\n\n def write_json(self, path: str, content: Any) -> None:\n \"\"\"Serializes to JSON and write an object to a file\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, json.dumps(content, separators=(',', ':')))"
},
{
"identifier": "Version",
"path": "pond/version.py",
"snippet": "class Version:\n\n def __init__(self, artifact_name: str, version_name: VersionName, artifact: Artifact,\n manifest: Optional[Manifest] = None):\n \"\"\" Manages a version: its manifest, name, and artifact.\n \"\"\"\n self.artifact_name = artifact_name\n self.version_name = version_name\n self.manifest = manifest\n self.artifact = artifact\n\n def get_metadata(self, location, datastore, data_filename):\n version_metadata = {\n 'uri': self.get_uri(location, datastore),\n 'filename': data_filename,\n 'date_time': datetime.datetime.now(),\n 'artifact_name': self.artifact_name,\n }\n version_metadata_source = DictMetadataSource(name='version', metadata=version_metadata)\n return version_metadata_source\n\n def write(self, location: str, datastore: Datastore, manifest: Manifest):\n # TODO: manifest is modified in-place, is that an issue?\n\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n #: filename for the saved data\n data_basename = f'{self.artifact_name}_{str(self.version_name)}'\n data_filename = self.artifact.filename(data_basename)\n\n version_metadata_source = self.get_metadata(location, datastore, data_filename)\n manifest.add_section(version_metadata_source)\n artifact_metadata_source = self.artifact.get_artifact_metadata()\n manifest.add_section(artifact_metadata_source)\n manifest.to_yaml(manifest_location, datastore)\n\n datastore.makedirs(version_location_)\n data_location = version_data_location(version_location_, data_filename)\n with datastore.open(data_location, 'wb') as f:\n self.artifact.write_bytes(f)\n\n # save stored manifest\n self.manifest = manifest\n\n # todo store and recover artifact_class from manifest\n @classmethod\n def read(cls, version_name, artifact_class, location, datastore):\n #: location of the version folder\n version_location_ = version_location(location, version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n if not datastore.exists(manifest_location):\n raise VersionDoesNotExist(location, str(version_name))\n manifest = Manifest.from_yaml(manifest_location, datastore)\n\n version_metadata = manifest.collect_section('version')\n data_filename = version_metadata['filename']\n data_location = version_data_location(version_location_, data_filename)\n user_metadata = manifest.collect_section('user')\n with datastore.open(data_location, 'rb') as f:\n artifact = artifact_class.read_bytes(f, metadata=user_metadata)\n\n version = cls(\n artifact_name=version_metadata['artifact_name'],\n version_name=version_name,\n artifact=artifact,\n manifest=manifest,\n )\n\n return version\n\n def get_uri(self, location, datastore):\n \"\"\" Build URI for a specific location and datastore. \"\"\"\n uri = version_uri(datastore.id, location, self.artifact_name, self.version_name)\n return uri\n\n def exists(self, location: str, datastore: Datastore):\n \"\"\" Does this version already exists on disk?\n\n Parameters\n ----------\n location: str\n Root location in the data store where artifacts are read/written. This is used to\n create folder-like groups inside a datastore. This can be, for instance, the name of\n a project or experiment.\n datastore: Datastore\n Data store object, representing the location where the artifacts are read/written.\n \"\"\"\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n return datastore.exists(manifest_location)"
},
{
"identifier": "SimpleVersionName",
"path": "pond/version_name.py",
"snippet": "class SimpleVersionName(VersionName):\n \"\"\"Simple version name are just an integer number (greater than 0) prefixed with \"v\" when\n rendered as string.\"\"\"\n\n _FORMAT = re.compile('^v?([1-9][0-9]*)$')\n\n # --- VersionName class interface\n\n @classmethod\n def from_string(cls, version_name: str) -> 'SimpleVersionName':\n match = SimpleVersionName._FORMAT.match(version_name)\n if not match:\n raise InvalidVersionName(version_name)\n return cls(int(match[1]))\n\n @classmethod\n def next(cls, prev: Optional['VersionName'] = None) -> VersionName:\n if prev is None:\n next_ = SimpleVersionName(1)\n elif not isinstance(prev, SimpleVersionName):\n raise IncompatibleVersionName(prev, SimpleVersionName)\n else:\n next_ = SimpleVersionName(prev.version_number + 1)\n return next_\n\n def __init__(self, version_number: int):\n self.version_number = version_number\n\n # -- VersionName protected interface\n\n def _partial_compare(self, other: VersionName) -> Optional[int]:\n if isinstance(other, SimpleVersionName):\n return 0 if self.version_number == other.version_number else (\n -1 if self.version_number < other.version_number else 1)\n return None\n\n # -- Magic methods\n\n def __hash__(self) -> int:\n return hash(self.version_number)\n\n def __str__(self) -> str:\n return f'v{self.version_number}'"
},
{
"identifier": "VersionName",
"path": "pond/version_name.py",
"snippet": "class VersionName(ABC):\n \"\"\" Base class for all kind of version naming conventions.\n\n It defines a way to sort version names and compute the next one.\n \"\"\"\n\n # --- VersionName class interface\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n raise InvalidVersionName(class_id)\n return subclass\n\n @classmethod\n def from_string(cls, version_name: str) -> 'VersionName':\n \"\"\"Parses a string into a version name.\n\n Parameters\n ----------\n version_name: str\n Version name as a string that needs to be parsed\n\n Returns\n -------\n VersionName\n The parsed version name\n\n Raises\n ------\n InvalidVersionName\n If the version name cannot be parsed\n \"\"\"\n # Only first-level subclasses for the moment, it should be sufficient\n # At the same time, we give up defining a version name priority, and will return the\n # first VersionName subclass that can parse the string\n # TODO: remove the magic\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n try:\n version = subclass.from_string(version_name)\n break\n except InvalidVersionName:\n pass\n else:\n raise InvalidVersionName(version_name)\n return version\n\n @classmethod\n @abstractmethod\n def next(cls, prev: 'VersionName') -> 'VersionName':\n \"\"\" Generate a new version name given a previous one.\n\n If `prev` is None, this method will generate a first version name.\n\n Some subclasses of `VersionName` will ignore the argument `prev`, except in case of\n collision (e.g., datetime version names).\n\n Parameters\n ----------\n prev: Optional['VersionName']\n The previous version name.\n\n Returns\n -------\n VersionName\n A new version name.\n \"\"\"\n ...\n\n @classmethod\n def first(cls) -> 'VersionName':\n \"\"\" Generate the first version name.\n\n Alias for `VersionName.next(None)`.\n\n Returns\n -------\n VersionName\n The first version name.\n \"\"\"\n return cls.next(prev=None)\n\n # --- VersionName protected interface\n\n @abstractmethod\n def _partial_compare(self, that: 'VersionName') -> Optional[int]:\n ...\n\n # --- Magic methods\n\n def __cmp__(self, other: 'VersionName') -> int:\n cmp = self._partial_compare(other)\n return cmp if cmp is not None else _compare_classnames(self, other)\n\n def __eq__(self, other: Any) -> bool:\n return self._partial_compare(other) == 0\n\n def __ne__(self, other: Any) -> bool:\n return self._partial_compare(other) != 0\n\n def __lt__(self, other: Any) -> bool:\n return self.__cmp__(other) < 0\n\n def __le__(self, other: Any) -> bool:\n return self.__cmp__(other) <= 0\n\n def __gt__(self, other: Any) -> bool:\n return self.__cmp__(other) > 0\n\n def __ge__(self, other: Any) -> bool:\n return self.__cmp__(other) >= 0\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{str(self)}\")'"
},
{
"identifier": "VersionedArtifact",
"path": "pond/versioned_artifact.py",
"snippet": "class VersionedArtifact:\n\n def __init__(self,\n artifact_name: str,\n location: str,\n datastore: Datastore,\n artifact_class: Type[Artifact],\n version_name_class: Type[VersionName]):\n \"\"\" An artifact versioned and stored on disk.\n\n `VersionedArtifact` manages the versioning, data, and metadata, of an artifact.\n\n Parameters\n ----------\n artifact_name: str\n Name of the artifact.\n location: str\n Root location in the data store where artifacts are read/written. This is used to\n create folder-like groups inside a datastore. This can be, for instance, the name of\n a project or experiment.\n datastore: Datastore\n Data store object, representing the storage where the artifacts are read/written.\n artifact_class: Type[Artifact]\n version_name_class: Type[VersionName]\n Class used to create increasing version names. The default value,\n `SimpleVersionName` creates version names as `v1`, `v2`, etc.\n \"\"\"\n self.artifact_name = artifact_name\n self.location = location\n self.datastore = datastore\n self.artifact_class = artifact_class\n self.version_name_class = version_name_class\n\n self.versions_manifest = {\n 'artifact_class': artifact_class.class_id(),\n 'version_name_class': version_name_class.class_id(),\n }\n\n self.versions_location = versioned_artifact_location(location, artifact_name)\n # todo this goes to conventions.py\n self.versions_list_location = f'{self.versions_location}/versions.json'\n self.versions_manifest_location = f'{self.versions_location}/manifest.yml'\n\n if not self.datastore.exists(self.versions_location):\n # Create the versioned artifact folder organization if it does not exist\n self.datastore.makedirs(self.versions_location)\n self._write_version_names([])\n self.versions_manifest['artifact_class'] = artifact_class.class_id()\n self.versions_manifest['version_name_class'] = version_name_class.class_id()\n self._write_manifest()\n\n # --- VersionedArtifact class interface\n\n @classmethod\n def from_datastore(cls, artifact_name: str, location: str, datastore: Datastore):\n versions_location = versioned_artifact_location(location, artifact_name)\n versions_manifest_location = f'{versions_location}/manifest.yml'\n versions_manifest = datastore.read_yaml(versions_manifest_location)\n\n artifact_class_id = versions_manifest['artifact_class']\n artifact_class = Artifact.subclass_from_id(artifact_class_id)\n version_name_class_id = versions_manifest['version_name_class']\n version_name_class = VersionName.subclass_from_id(version_name_class_id)\n\n versioned_artifact = cls(\n artifact_name=artifact_name,\n location=location,\n datastore=datastore,\n artifact_class=artifact_class,\n version_name_class=version_name_class,\n )\n return versioned_artifact\n\n # --- VersionedArtifact public interface\n\n def read(self, version_name: Optional[Union[str, VersionName]] = None) -> Version:\n \"\"\" Read a version of the artifact.\n\n Parameters\n ----------\n version_name: Union[str, VersionName], optional\n Version name, given as a string (more common) or as VersionName instance. If None,\n the latest version name for the given artifact is used.\n\n Raises\n ------\n VersionDoesNotExist\n If the requested version does not exist.\n\n Returns\n -------\n Version\n The version object read from storage.\n \"\"\"\n\n if version_name is not None:\n if isinstance(version_name, str):\n version_name = self.version_name_class.from_string(version_name)\n else:\n version_name = self.latest_version_name()\n\n version = Version.read(\n version_name=version_name,\n artifact_class=self.artifact_class,\n datastore=self.datastore,\n location=self.versions_location,\n )\n\n return version\n\n def write(self,\n data: DataType,\n manifest: Manifest,\n version_name: Optional[Union[str, VersionName]] = None,\n write_mode: WriteMode = WriteMode.ERROR_IF_EXISTS):\n \"\"\" Write some data to storage.\n\n Parameters\n ----------\n data: DataType\n The artifact data to write.\n manifest: Manifest\n Metadata to store with the data.\n version_name: Union[str, VersionName], optional\n Version name, given as a string (more common) or as VersionName instance. If None,\n the latest version name for the given artifact is used.\n write_mode: WriteMode\n Write mode, either WriteMode.ERROR_IF_EXISTS or WriteMode.OVERWRITE.\n\n Raises\n ------\n IncompatibleVersionName\n If the provided version name does not correspond to the version name class used in\n this versioned artifact.\n VersionAlreadyExists\n If the provided version name exists, and the write mode is \"ERROR_IF_EXISTS\".\n\n Returns\n -------\n Version\n The version object read from storage.\n \"\"\"\n # todo lock\n\n if version_name is None:\n prev_version_name = self.latest_version_name(raise_if_none=False)\n version_name = self.version_name_class.next(prev_version_name)\n\n if isinstance(version_name, str):\n version_name = VersionName.from_string(version_name)\n\n if not isinstance(version_name, self.version_name_class):\n raise IncompatibleVersionName(\n version_name=version_name,\n version_name_class=self.version_name_class,\n )\n\n user_metadata = manifest.collect_section('user', default_metadata={})\n artifact = self.artifact_class(data, metadata=user_metadata)\n version = Version(self.artifact_name, version_name, artifact)\n\n if version.exists(self.versions_location, self.datastore):\n if write_mode == WriteMode.ERROR_IF_EXISTS:\n uri = version.get_uri(self.location, self.datastore)\n raise VersionAlreadyExists(uri)\n elif write_mode == WriteMode.OVERWRITE:\n uri = version.get_uri(self.location, self.datastore)\n logger.info(f\"Deleting existing version before overwriting: {uri}\")\n version_location_ = version_location(self.versions_location, version_name)\n self.datastore.delete(version_location_, recursive=True)\n\n version.write(self.versions_location, self.datastore, manifest)\n self._register_version_name(version_name)\n\n return version\n\n def all_version_names(self) -> List[VersionName]:\n \"\"\"Get all locked (and existing) artifact version names.\n\n Locked versions might not be existing yet, they are just reserved names.\n\n Returns\n -------\n List[VersionName]\n A list of all locked version names\n \"\"\"\n try:\n raw_versions = json.loads(self.datastore.read(self.versions_list_location))\n except FileNotFoundError:\n raw_versions = []\n versions = [VersionName.from_string(raw_version) for raw_version in list(raw_versions)]\n return sorted(versions)\n\n def version_names(self) -> List[VersionName]:\n \"\"\"Get all existing artifact version names.\n\n Versions are considered as \"existing\" as soon as they have a \"manifest.yml\"\n\n Returns\n -------\n List[VersionName]\n A list of all existing version names\n \"\"\"\n # todo create version_exists\n return [\n name for name in self.all_version_names()\n if self.datastore.exists(\n version_manifest_location(\n version_location(self.versions_location, name)\n )\n )\n ]\n\n def latest_version_name(self, raise_if_none=True) -> VersionName:\n \"\"\"Get the name of the latest version. If none is defined, will raise an exception\n\n Raises\n ------\n ArtifactHasNoVersion\n If the artifact has no latest version\n\n Returns\n -------\n VersionName\n The name of the latest version\n \"\"\"\n versions = self.version_names()\n if not versions:\n if raise_if_none:\n raise ArtifactHasNoVersion(self.location)\n else:\n return None\n return versions[-1]\n\n def latest_version(self) -> Version:\n \"\"\"Get the latest version. If none is defined, will raise an exception\n\n Raises\n ------\n TableHasNoVersion\n If the artifact has no latest version\n\n Returns\n -------\n Version\n The latest version of this artifact\n \"\"\"\n return self.read(self.latest_version_name())\n\n # TODO: TEST\n def delete_version(self, version_name: Union[str, VersionName]) -> None:\n \"\"\"Delete a version, will not fail if the version did not exist\n\n Parameters\n ----------\n version_name: Union[str, VersionName]\n Name of the version to delete\n \"\"\"\n if not isinstance(version_name, VersionName):\n version_name = VersionName.from_string(version_name)\n\n self.datastore.delete(version_location(self.location, version_name), recursive=True)\n\n # todo: need to lock versions.json here\n names = self.all_version_names()\n if version_name in names:\n names.remove(version_name)\n self._write_version_names(names)\n # todo: need to unlock versions.json here\n\n # --- VersionedArtifact private interface\n\n def _create_version_name(self, retry: bool = True) -> VersionName:\n versions_lock_file = versions_lock_file_location(self.location)\n if self.datastore.exists(versions_lock_file):\n # In case another process just created the data dir and did non update yet the versions\n # list, let's wait a little and retry once\n if retry:\n time.sleep(NEW_VERSION_WAIT_MS / 1000)\n return self._create_version_name(False)\n else:\n raise ArtifactVersionsIsLocked(self.location)\n # todo: this is not safe in case of concurrency.\n self.datastore.write_string(versions_lock_file, '')\n try:\n names = self.all_version_names()\n name = names[-1].next() if names else FIRST_VERSION_NAME\n new_version_name = self._register_version_name(name)\n finally:\n self.datastore.delete(versions_lock_file)\n\n return new_version_name\n\n def _register_version_name(self, name: VersionName) -> VersionName:\n # todo: need to lock versions.json here\n names = self.all_version_names()\n\n if name not in names:\n names.append(name)\n self._write_version_names(names)\n # todo: need to unlock versions.json here\n\n return name\n\n def _write_version_names(self, names: List[VersionName]) -> None:\n \"\"\"Sort, serialize and write version names\"\"\"\n strings = [str(name) for name in sorted(names)]\n self.datastore.write_json(self.versions_list_location, strings)\n\n def _write_manifest(self):\n self.datastore.write_yaml(self.versions_manifest_location, self.versions_manifest)\n\n def _read_manifest(self):\n return self.datastore.read_yaml(self.versions_manifest_location)"
}
] | from typing import Any, Dict, Optional, Set, Type, Union
from pond.artifact import Artifact
from pond.artifact.artifact_registry import ArtifactRegistry, global_artifact_registry
from pond.conventions import DataType, WriteMode
from pond.metadata.metadata_source import MetadataSource
from pond.metadata.dict import DictMetadataSource
from pond.metadata.manifest import Manifest
from pond.storage.datastore import Datastore
from pond.version import Version
from pond.version_name import SimpleVersionName, VersionName
from pond.versioned_artifact import VersionedArtifact | 8,776 |
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA',
|
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA', | version_name_class: Type[VersionName] = SimpleVersionName, | 8 | 2023-12-24 13:05:58+00:00 | 12k |
demirogun/pyethnobiology | pyethnobiology/pyethnobiology.py | [
{
"identifier": "UR",
"path": "pyethnobiology/indices.py",
"snippet": "class UR:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Use Report (UR) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the UR for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and UR columns.\n \"\"\"\n\n ur_df = (\n self.data.groupby(self.taxon_column, observed=True)\n .size()\n .reset_index(name=\"UR\")\n .sort_values(by=\"UR\", ascending=False)\n .reset_index(drop=True)\n )\n return ur_df\n\n def save_data(self):\n UR_df = self.calculate()\n UR_df.to_csv(\"use_report_UR.csv\", index=False)\n print(\"Saved to use_report_UR.csv\")\n\n def plot_radial(self, filename=\"UR.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"UR\", num_row, ytick_position, colors, show_colorbar,\n self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "CI",
"path": "pyethnobiology/indices.py",
"snippet": "class CI:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Cultural Importance (CI) Index\"\n\n def calculate(self):\n \"\"\"\n Calculates the cultural importance index (CI) for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and CI columns.\n \"\"\"\n\n # Calculate Use Reports (UR) per species\n ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Count unique informants\n informants_count = self.data[self.informant_column].nunique()\n\n # Merge UR and informants count based on 'taxon'\n ci_df = pd.merge(\n ur_df,\n self.data[[self.taxon_column, self.informant_column]]\n .drop_duplicates()\n .groupby(self.taxon_column, observed=False)\n .size()\n .reset_index(name=f\"{self.informant_column}s_count\"),\n on=self.taxon_column,\n )\n\n # Calculate CI index (UR divided by the number of informants)\n ci_df[\"CI\"] = ci_df[\"UR\"] / informants_count\n\n # Keep only relevant columns\n ci_df = ci_df[[self.taxon_column, \"CI\"]]\n\n return ci_df\n\n def save_data(self):\n CI_df = self.calculate()\n CI_df.to_csv(\"cultural_importance_CI.csv\", index=False)\n print(\"Saved to cultural_importance_CI.csv\")\n\n def plot_radial(self, filename=\"CI.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"CI\", num_row, ytick_position,\n colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "FC",
"path": "pyethnobiology/indices.py",
"snippet": "class FC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def calculate(self):\n \"\"\"\n Calculates the frequency of citation (FC) for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and FC columns.\n \"\"\"\n\n # Calculate FC per species by counting unique informants for each taxon\n fc_df = (\n self.data.groupby(self.taxon_column, observed=True)[self.informant_column]\n .nunique()\n .reset_index(name=\"FC\")\n )\n\n # Sort FC values in descending order\n fc_df = fc_df.sort_values(by=\"FC\", ascending=False).reset_index(drop=True)\n\n return fc_df\n\n def save_data(self):\n FC_df = self.calculate()\n FC_df.to_csv(\"frequency_of_citation_FC.csv\", index=False)\n print(\"Saved to frequency_of_citation_FC.csv\")\n\n def plot_radial(self, filename=\"FC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), \"Frequency of Citation (FC)\", \"FC\", num_row, ytick_position, colors,\n show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "NU",
"path": "pyethnobiology/indices.py",
"snippet": "class NU:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Number of Uses (NU) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the NU for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and NU columns.\n \"\"\"\n\n nu_df = (\n self.data.groupby(self.taxon_column, observed=True)[self.use_column]\n .nunique()\n .reset_index(name=\"NU\")\n )\n nu_df = nu_df.sort_values(by=\"NU\", ascending=False).reset_index(drop=True)\n return nu_df\n def save_data(self):\n NU_df = self.calculate()\n NU_df.to_csv(\"number_of_uses_NU.csv\", index=False)\n print(\"Saved to number_of_uses_NU.csv\")\n\n def plot_radial(self, filename=\"NU.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"NU\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "RFC",
"path": "pyethnobiology/indices.py",
"snippet": "class RFC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Relative Frequency of Citation (RFC) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the RFC for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and RFC columns.\n \"\"\"\n\n # Get frequency of citation (FC) for each species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Get total number of informants\n total_informants = self.data[self.informant_column].nunique()\n\n # Calculate use reports (UR) for each species\n ur_df = (\n self.data[[self.taxon_column, self.informant_column]]\n .groupby(self.taxon_column, observed=True)\n .size()\n .reset_index(name=\"UR\")\n )\n\n # Merge FC, UR, and total informants\n rfc_df = pd.merge(fc_df, ur_df, on=self.taxon_column)\n rfc_df[\"RFC\"] = rfc_df[\"FC\"] / total_informants\n\n # Keep only taxon and RFC columns\n rfc_df = rfc_df[[self.taxon_column, \"RFC\"]]\n return rfc_df\n\n def save_data(self):\n RFC_df = self.calculate()\n RFC_df.to_csv(\"relative_frequency_of_citation_RFC.csv\", index=False)\n print(\"Saved to relative_frequency_of_citation_RFC.csv\")\n\n def plot_radial(self, filename=\"RFC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"RFC\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "RI",
"path": "pyethnobiology/indices.py",
"snippet": "class RI:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Relative Importance (RI) Index per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the RI for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and RI columns.\n \"\"\"\n\n # Get RFC and NU for each species\n rfc_df = RFC(\n self.data, self.informant_column, self.taxon_column, self.use_column\n ).calculate()\n nu_df = NU(\n self.data, self.informant_column, self.taxon_column, self.use_column\n ).calculate()\n\n # Normalize RFC and NU\n max_rfc = rfc_df[\"RFC\"].max()\n max_nu = nu_df[\"NU\"].max()\n rfc_df[\"RFC(max)\"] = rfc_df[\"RFC\"] / max_rfc\n nu_df[\"RNU(max)\"] = nu_df[\"NU\"] / max_nu\n\n # Merge RFC(max) and RNU(max)\n ri_df = pd.merge(\n rfc_df[[self.taxon_column, \"RFC(max)\"]],\n nu_df[[self.taxon_column, \"RNU(max)\"]],\n on=self.taxon_column,\n )\n\n # Calculate RI index\n ri_df[\"RI\"] = (ri_df[\"RFC(max)\"] + ri_df[\"RNU(max)\"]) / 2\n\n # Sort and return RI values\n ri_df = ri_df.sort_values(by=\"RI\", ascending=False).reset_index(drop=True)\n return ri_df[[self.taxon_column, \"RI\"]]\n\n def save_data(self):\n RI_df = self.calculate()\n RI_df.to_csv(\"relative_importance_RI.csv\", index=False)\n print(\"Saved to relative_importance_RI.csv\")\n\n def plot_radial(self, filename=\"RI.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"RI\", num_row, ytick_position, colors, show_colorbar,\n self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "UV",
"path": "pyethnobiology/indices.py",
"snippet": "class UV:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Use Value (UV) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the UV for each species.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and UV columns.\n \"\"\"\n UV_df = CI(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n UV_df = UV_df.rename(columns={\"CI\": \"UV\"})\n return UV_df\n\n def save_data(self):\n UV_df = self.calculate()\n UV_df.to_csv(\"use_value_UV.csv\", index=False)\n print(\"Saved to use_value_UV.csv\")\n\n def plot_radial(self, filename=\"UV.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"UV\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "CV",
"path": "pyethnobiology/indices.py",
"snippet": "class CV:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Cultural Value (CV) for Ethnospecies\"\n\n def calculate(self):\n \"\"\"\n Calculates the cultural value (CV) for each ethnospecies.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon and CV columns.\n \"\"\"\n\n # Calculate Use Reports (UR) per species\n ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Number of Uses (NU) per species\n nu_df = NU(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Frequency of Citation (FC) per species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Calculate Uce (Use Citation for Ethnospecies)\n potential_uses = self.data[self.use_column].nunique()\n nu_df[\"Uce\"] = nu_df[\"NU\"] / potential_uses\n\n # Calculate Ice (Informant Citation Index)\n ice = fc_df[\"FC\"] / self.data[self.informant_column].nunique()\n fc_df[\"Ice\"] = ice\n\n # Calculate IUce (Informant Use Index)\n iuce = ur_df[\"UR\"] / self.data[self.informant_column].nunique()\n ur_df[\"IUce\"] = iuce\n\n # Merge dataframes to calculate CV\n merged_df = pd.merge(nu_df[[self.taxon_column, \"Uce\"]], ur_df[[self.taxon_column, \"IUce\"]], on=self.taxon_column)\n merged_df = pd.merge(merged_df, fc_df[[self.taxon_column, \"Ice\"]], on=self.taxon_column)\n\n # Calculate CV = Uce * Ice * IUce\n merged_df[\"CV\"] = merged_df[\"Uce\"] * merged_df[\"Ice\"] * merged_df[\"IUce\"]\n\n # Sort and round CV values\n cv_df = merged_df[[self.taxon_column, \"CV\"]].sort_values(by=\"CV\", ascending=False)\n\n return cv_df\n\n def save_data(self):\n CV_df = self.calculate()\n CV_df.to_csv(\"cultural_value_CV.csv\", index=False)\n print(\"Saved to cultural_value_CV.csv\")\n\n def plot_radial(self, filename=\"CV.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"CV\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "FL",
"path": "pyethnobiology/indices.py",
"snippet": "class FL:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs. Defaults to \"informant\".\n taxon_column (str, optional): Name of the column containing species names. Defaults to \"taxon\".\n use_column (str, optional): Name of the column containing plant uses. Defaults to \"ailments_treated\".\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Fidelity Level (FL) per Species\"\n\n def calculate(self):\n \"\"\"\n Calculates the fidelity level (FL) for each species-use combination.\n\n Returns:\n pd.DataFrame: DataFrame containing taxon, use, and FL columns.\n \"\"\"\n\n # Calculate Frequency of Citation (FC) per species\n fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate()\n\n # Count informants for each species-use combination\n ns_df = (\n self.data.groupby([self.taxon_column, self.use_column])[self.informant_column]\n .nunique()\n .reset_index(name=\"Ns\")\n )\n\n # Merge FC and Ns dataframes\n merged_df = pd.merge(ns_df, fc_df, on=self.taxon_column)\n\n # Calculate FL = (Ns * 100) / FC\n merged_df[\"FL\"] = (merged_df[\"Ns\"] * 100) / merged_df[\"FC\"]\n\n # Exclude rows with FL of 0\n merged_df = merged_df[merged_df[\"FL\"] != 0]\n\n return merged_df[[self.taxon_column, self.use_column, \"FL\"]]\n\n def save_data(self, filename=\"fidelity_level_FL.csv\"):\n \"\"\"\n Saves the calculated FL data to a CSV file.\n\n Args:\n filename (str, optional): Name of the CSV file to save. Defaults to \"fidelity_level_FL.csv\".\n \"\"\"\n\n fl_df = self.calculate()\n fl_df.to_csv(filename, index=False)\n print(f\"Saved to {filename}\")\n\n def plot_heatmap(self,\n filename=\"FL.png\",\n cmap=\"coolwarm\",\n show_colorbar=True,\n colorbar_shrink=0.50,\n plot_width=10,\n plot_height=8,\n dpi=300,\n fillna_zero=True):\n \"\"\"\n Creates a heatmap of FL values for each species-use combination,\n with customizable features for plot appearance and layout.\n \"\"\"\n\n data = self.calculate()\n heatmap_plot = HeatmapPlot(\n data=data,\n title=\"Fidelity Level (FL)\",\n value_column=\"FL\",\n row_column=self.taxon_column,\n column_column=self.use_column,\n cmap=cmap,\n show_colorbar=show_colorbar,\n colorbar_shrink=colorbar_shrink,\n plot_width=plot_width,\n plot_height=plot_height,\n dpi=dpi,\n fillna_zero=fillna_zero,\n )\n heatmap_plot.save_plot(filename, dpi=dpi)\n return heatmap_plot.plot()"
},
{
"identifier": "FIC",
"path": "pyethnobiology/indices.py",
"snippet": "class FIC:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:\n data (pd.DataFrame): DataFrame containing plant usage information.\n informant_column (str, optional): Name of the column containing informant IDs.\n taxon_column (str, optional): Name of the column containing species names.\n use_column (str, optional): Name of the column containing plant uses.\n \"\"\"\n\n self.data = data\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n self.title = \"Informant Consensus Factor (FIC)\"\n\n def calculate(self):\n \"\"\"\n Calculates the FIC for each ailment category.\n\n Returns:\n pd.DataFrame: DataFrame containing ailment category and FIC columns.\n \"\"\"\n\n unique_ailment_categories = self.data[self.use_column].unique()\n fic_values = []\n\n for ailment_category in unique_ailment_categories:\n specific_data = self.data[self.data[self.use_column] == ailment_category]\n\n # Calculate Nur (number of use reports)\n nur = specific_data.shape[0]\n\n # Calculate Nt (number of taxa used)\n nt = specific_data[self.taxon_column].nunique()\n\n # Calculate FIC value\n if nur > nt:\n fic = (nur - nt) / (nur - 1)\n else:\n fic = 0 # Set FIC to 0 if Nur <= Nt\n\n fic_values.append({self.use_column: ailment_category, \"FIC\": fic})\n\n fic_df = pd.DataFrame(fic_values)\n fic_df = fic_df.sort_values(by=\"FIC\", ascending=False).reset_index(drop=True)\n return fic_df\n\n def save_data(self):\n FIC_df = self.calculate()\n FIC_df.to_csv(\"informant_consensus_factor_FIC.csv\", index=False)\n print(\"Saved to informant_consensus_factor_FIC.csv\")\n\n def plot_radial(self, filename=\"FIC.png\", dpi=300, num_row=10, ytick_position=\"onbar\", colors=None, show_colorbar=True):\n # Plot radial bar chart\n radial_plot = RadialPlot(self.calculate(), self.title, \"FIC\", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column)\n radial_plot.save_plot(filename, dpi=dpi)\n radial_plot.plot()"
},
{
"identifier": "Jaccard",
"path": "pyethnobiology/stats.py",
"snippet": "class Jaccard:\n def __init__(self, data: pd.DataFrame):\n self.data = data\n\n def convert_data(self, literature_column: str, taxon_column: str, use_column: str) -> pd.DataFrame:\n \"\"\"Converts data to a specified format, handling varying ailment names and extracting literature references.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the data to be converted.\n literature_column (str): The name of the column containing literature references.\n taxon_column (str): The name of the column containing taxon names.\n use_column (str): The name of the column containing ailment names.\n\n Returns:\n pd.DataFrame: The converted DataFrame with the following columns:\n - \"study\": Study identifier (either \"My Study\" or literature references)\n - taxon_column (str): Taxon name\n - Ailment columns (one for each unique ailment): 0 or 1 indicating presence/absence\n \"\"\"\n\n # Ensure literature column is string type\n if not pd.api.types.is_string_dtype(self.data[literature_column]):\n self.data[literature_column] = self.data[literature_column].astype(str)\n\n # Create an empty DataFrame with the desired columns\n converted_data = pd.DataFrame(columns=[\"study\", taxon_column])\n unique_ailments = set(self.data[use_column])\n for ailment in unique_ailments:\n converted_data[ailment] = 0 # Add columns for all unique ailments\n\n # Iterate through each row efficiently using itertuples\n for row in self.data.itertuples():\n taxon = getattr(row, taxon_column)\n use = getattr(row, use_column)\n\n # Extract literature references (handling potential errors)\n try:\n literature_references = getattr(row, literature_column).split(\";\")\n except (AttributeError, ValueError):\n literature_references = []\n\n # Create rows for \"My Study\" and literature references\n rows_to_add = [\n {\"study\": \"My Study\", taxon_column: taxon, use: 1} # Row for \"My Study\"\n ]\n rows_to_add.extend(\n {\n \"study\": ref,\n taxon_column: taxon,\n use: 1, # Set the relevant ailment column to 1\n }\n for ref in literature_references\n )\n\n # Concatenate new rows efficiently using list comprehension\n converted_data = pd.concat(\n [\n converted_data,\n pd.DataFrame(rows_to_add), # Create a DataFrame from the list of rows\n ],\n ignore_index=True,\n )\n\n # Fill missing values with 0 and group data\n converted_data = converted_data.fillna(0).groupby([\"study\", taxon_column]).sum().clip(upper=1)\n\n return converted_data\n\n def fill_missing_taxa_dynamic(self) -> pd.DataFrame:\n\n \"\"\"Fills missing taxa in a DataFrame with appropriate ailment values based on other studies.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the data to be processed.\n\n Returns:\n pd.DataFrame: The DataFrame with missing taxa filled in.\n \"\"\"\n\n study_data = {}\n ailment_names = list(self.data.columns[:-2]) # Get ailment names from DataFrame columns\n\n for index, row in self.data.iterrows():\n study_name = row['study']\n taxon = row['taxon']\n ailments = row[:-2].tolist() # Extract ailments as a list\n\n if study_name not in study_data:\n study_data[study_name] = {}\n\n study_data[study_name][taxon] = ailments\n\n for study in study_data:\n taxa_in_my_study = study_data[\"My Study\"].keys()\n for taxon in taxa_in_my_study:\n if taxon not in study_data[study]:\n study_data[study][taxon] = [0] * len(ailment_names)\n\n # Create a list to hold the transformed data\n transformed_data = []\n for study, study_values in study_data.items():\n for taxon, ailments in study_values.items():\n row_data = [study, taxon] + ailments\n transformed_data.append(row_data)\n\n # Create a DataFrame from the transformed data\n columns = ['study', 'taxon'] + ailment_names\n result_df = pd.DataFrame(transformed_data, columns=columns)\n\n return result_df\n\n def calculate_jaccard_similarity(self, study_column: str, taxon_column: str, ailment_columns: list[str],\n my_study: str) -> dict[tuple[str, str], float]:\n\n \"\"\"Calculates pairwise Jaccard similarity between 'My Study' and other studies based on ailments.\n\n Args:\n data (pd.DataFrame): The input DataFrame containing the dataset.\n study_column (str): Column name for the study identifier.\n taxon_column (str): Column name for the taxon information.\n ailment_columns (List[str]): List of ailment column names.\n my_study (str): Identifier for 'My Study'.\n\n Returns:\n Dict[Tuple[str, str], float]: Dictionary containing Jaccard similarities between 'My Study' and other studies.\n \"\"\"\n # Get unique studies\n studies = self.data[study_column].unique()\n\n # Create a dictionary to store Jaccard similarity between 'My Study' and other studies\n jaccard_similarities = []\n\n # Calculate Jaccard similarity for 'My Study' against other studies\n for other_study in studies:\n if other_study != my_study:\n subset1 = self.data[self.data[study_column] == my_study][ailment_columns]\n subset2 = self.data[self.data[study_column] == other_study][ailment_columns]\n\n # Flatten ailment columns for Jaccard similarity calculation\n subset1_flattened = subset1.values.flatten()\n subset2_flattened = subset2.values.flatten()\n\n # Calculate Jaccard similarity for ailment columns using sklearn's jaccard_score\n jaccard_sim = jaccard_score(subset1_flattened, subset2_flattened)\n\n jaccard_similarities.append({\"study\": other_study, \"similarity\": jaccard_sim})\n\n jaccard_similarities = sorted(jaccard_similarities, key=lambda x: x['similarity'], reverse=True)\n\n return pd.DataFrame(jaccard_similarities)\n\n def run_analysis(self, literature_column: str, taxon_column: str, use_column: str, my_study: str = \"My Study\"):\n self.data = self.data.dropna(subset=[literature_column])\n self.data = self.convert_data(literature_column, taxon_column, use_column)\n self.data['study'] = self.data.index.get_level_values(\"study\")\n self.data['taxon'] = self.data.index.get_level_values(taxon_column)\n self.data = self.fill_missing_taxa_dynamic()\n ailment_columns = self.data.columns[2:]\n return self.calculate_jaccard_similarity(study_column=\"study\", taxon_column=\"taxon\",\n ailment_columns=ailment_columns, my_study=my_study)"
},
{
"identifier": "ChordPlot",
"path": "pyethnobiology/visualization.py",
"snippet": "class ChordPlot:\n\n def __init__(\n\n self,\n data: pd.DataFrame,\n by: str = \"taxon\",\n informant_column: str = \"informant\",\n taxon_column: str = \"taxon\",\n use_column: str = \"ailments_treated\",\n colors: str = None,\n min_info_count: int = None,\n get_first: int = None\n ):\n\n \"\"\"\n Initialize a ChordPlot object for visualizing relationships between data elements.\n\n Args:\n data (pd.DataFrame): The data frame containing relevant information.\n by (str, optional): The column to group data by, defaults to \"informant\".\n informant_column (str, optional): The column name for informant data, defaults to \"informant\".\n taxon_column (str, optional): The column name for taxon data, defaults to \"taxon\".\n use_column (str, optional): The column name for additional data associated with each pair, defaults to \"ailments_treated\".\n colors (list, optional): A list of colors for the links in the plot.\n min_info_count (int, optional): The minimum information count to include in the plot.\n get_first (int, optional): The number of top entries to show in the plot.\n\n Returns:\n A ChordPlot object.\n\n \"\"\"\n\n self.data = data\n self.colors = colors\n self.by = by\n self.min_info_count = min_info_count\n self.get_first = get_first\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def plot(self):\n\n \"\"\"\n Generate and display a circular chord plot using the prepared data.\n\n Returns:\n A Circos object containing the plot figure.\n\n Raises:\n Exception: If any error occurs during plot generation.\n \"\"\"\n\n # Prepare data for visualization\n matrix, order = self._prepare_data()\n\n # Create the Circos plot\n circos = self._create_plot(matrix, order)\n\n return circos.plotfig()\n\n def save_plot(self, filename: str, dpi: int = 300):\n\n \"\"\"\n Generate and save a circular chord plot using the prepared data.\n\n Args:\n filename (str): The name of the file to save the plot to.\n dpi (int, optional): The resolution of the plot, defaults to 300.\n\n Raises:\n Exception: If any error occurs during plot generation.\n \"\"\"\n\n # Prepare data for visualization\n matrix, order = self._prepare_data()\n\n # Create the Circos plot\n circos = self._create_plot(matrix, order)\n\n # Save the plot to a file\n circos.savefig(filename, dpi=dpi)\n\n\n def _prepare_data(self) -> pd.DataFrame:\n\n \"\"\"\n Prepare the data for generating the ChordPlot by counting occurrences and creating a matrix.\n\n Returns:\n A tuple containing:\n - matrix (pd.DataFrame): A data frame with informant counts for each pair.\n - order (list): A list of labels for the circular plot.\n \"\"\"\n\n if self.by == \"informant\":\n taxon_column = self.informant_column\n ailments_treated_column = self.use_column\n else:\n taxon_column = self.taxon_column\n ailments_treated_column = self.use_column\n\n informant_counts = (\n self.data.groupby([taxon_column, ailments_treated_column])\n .size()\n .reset_index(name=\"informant_count\")\n .sort_values(by=\"informant_count\", ascending=False)\n ) # Remove slicing for now\n\n # Apply filtering based on user preference\n if self.get_first is not None:\n informant_counts = informant_counts.head(self.get_first) # Limit by number of species\n elif self.min_info_count is not None:\n informant_counts = informant_counts[\n informant_counts[\"informant_count\"] >= self.min_info_count] # Limit by minimum count\n\n informant_counts = informant_counts.reset_index(drop=True)\n\n matrix_data = [[row[taxon_column], row[ailments_treated_column], row[\"informant_count\"]] for idx, row in\n informant_counts.iterrows()]\n matrix = Matrix.parse_fromto_table(pd.DataFrame(matrix_data))\n order = list(set(informant_counts[taxon_column].to_list())) + list(\n set(informant_counts[ailments_treated_column].to_list()))\n return matrix, order\n\n def _create_plot(self, matrix: pd.DataFrame, order: list) -> Circos:\n\n \"\"\"\n Create the Circos plot using the prepared data and configuration.\n\n Args:\n matrix (pd.DataFrame): The data frame with informant counts for each pair.\n order (list): The list of labels for the circular plot.\n\n Returns:\n A Circos object containing the plot figure.\n \"\"\"\n\n circos = Circos.initialize_from_matrix(\n matrix=matrix,\n space=3,\n r_lim=(97, 100),\n cmap=self.colors if self.colors else \"tab10\",\n label_kws=dict(size=9, orientation=\"vertical\"),\n link_kws=dict(ec=\"black\", lw=0.1),\n order=order,\n )\n return circos"
}
] | import pandas as pd
import rdata
from .indices import UR, CI, FC, NU, RFC, RI, UV, CV, FL, FIC
from .stats import Jaccard
from .visualization import ChordPlot | 9,727 |
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
def NU(self):
NU_class = NU(self.data, self.informant_column, self.taxon_column, self.use_column)
return NU_class
def UR(self):
UR_class = UR(self.data, self.informant_column, self.taxon_column, self.use_column)
return UR_class
def RFC(self):
RFC_class = RFC(self.data, self.informant_column, self.taxon_column, self.use_column)
return RFC_class
def RI(self):
RI_class = RI(self.data, self.informant_column, self.taxon_column, self.use_column)
return RI_class
def UV(self):
UV_class = UV(self.data, self.informant_column, self.taxon_column, self.use_column)
return UV_class
def CV(self):
CV_class = CV(self.data, self.informant_column, self.taxon_column, self.use_column)
return CV_class
|
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
def NU(self):
NU_class = NU(self.data, self.informant_column, self.taxon_column, self.use_column)
return NU_class
def UR(self):
UR_class = UR(self.data, self.informant_column, self.taxon_column, self.use_column)
return UR_class
def RFC(self):
RFC_class = RFC(self.data, self.informant_column, self.taxon_column, self.use_column)
return RFC_class
def RI(self):
RI_class = RI(self.data, self.informant_column, self.taxon_column, self.use_column)
return RI_class
def UV(self):
UV_class = UV(self.data, self.informant_column, self.taxon_column, self.use_column)
return UV_class
def CV(self):
CV_class = CV(self.data, self.informant_column, self.taxon_column, self.use_column)
return CV_class
| def FL(self): | 8 | 2023-12-25 01:06:51+00:00 | 12k |
JiePKU/MoLE | train_textual_inversion_XTI.py | [
{
"identifier": "ConfigSanitizer",
"path": "library/config_util.py",
"snippet": "class ConfigSanitizer:\n # @curry\n @staticmethod\n def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:\n Schema(ExactSequence([klass, klass]))(value)\n return tuple(value)\n\n # @curry\n @staticmethod\n def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple:\n Schema(Any(klass, ExactSequence([klass, klass])))(value)\n try:\n Schema(klass)(value)\n return (value, value)\n except:\n return ConfigSanitizer.__validate_and_convert_twodim(klass, value)\n\n # subset schema\n SUBSET_ASCENDABLE_SCHEMA = {\n \"color_aug\": bool,\n \"face_crop_aug_range\": functools.partial(__validate_and_convert_twodim.__func__, float),\n \"flip_aug\": bool,\n \"num_repeats\": int,\n \"random_crop\": bool,\n \"shuffle_caption\": bool,\n \"keep_tokens\": int,\n \"token_warmup_min\": int,\n \"token_warmup_step\": Any(float,int),\n }\n # DO means DropOut\n DO_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_dropout_every_n_epochs\": int,\n \"caption_dropout_rate\": Any(float, int),\n \"caption_tag_dropout_rate\": Any(float, int),\n }\n # DB means DreamBooth\n DB_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_extension\": str,\n \"class_tokens\": str,\n }\n DB_SUBSET_DISTINCT_SCHEMA = {\n Required(\"image_dir\"): str,\n \"is_reg\": bool,\n }\n # FT means FineTuning\n FT_SUBSET_DISTINCT_SCHEMA = {\n Required(\"metadata_file\"): str,\n \"image_dir\": str,\n }\n\n # datasets schema\n DATASET_ASCENDABLE_SCHEMA = {\n \"batch_size\": int,\n \"bucket_no_upscale\": bool,\n \"bucket_reso_steps\": int,\n \"enable_bucket\": bool,\n \"max_bucket_reso\": int,\n \"min_bucket_reso\": int,\n \"resolution\": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),\n }\n\n # options handled by argparse but not handled by user config\n ARGPARSE_SPECIFIC_SCHEMA = {\n \"debug_dataset\": bool,\n \"max_token_length\": Any(None, int),\n \"prior_loss_weight\": Any(float, int),\n }\n # for handling default None value of argparse\n ARGPARSE_NULLABLE_OPTNAMES = [\n \"face_crop_aug_range\",\n \"resolution\",\n ]\n # prepare map because option name may differ among argparse and user config\n ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME = {\n \"train_batch_size\": \"batch_size\",\n \"dataset_repeats\": \"num_repeats\",\n }\n\n def __init__(self, support_dreambooth: bool, support_finetuning: bool, support_dropout: bool) -> None:\n assert support_dreambooth or support_finetuning, \"Neither DreamBooth mode nor fine tuning mode specified. Please specify one mode or more. / DreamBooth モードか fine tuning モードのどちらも指定されていません。1つ以上指定してください。\"\n\n self.db_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_DISTINCT_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.ft_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.FT_SUBSET_DISTINCT_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.db_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.db_subset_schema]},\n )\n\n self.ft_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.ft_subset_schema]},\n )\n\n if support_dreambooth and support_finetuning:\n def validate_flex_dataset(dataset_config: dict):\n subsets_config = dataset_config.get(\"subsets\", [])\n\n # check dataset meets FT style\n # NOTE: all FT subsets should have \"metadata_file\"\n if all([\"metadata_file\" in subset for subset in subsets_config]):\n return Schema(self.ft_dataset_schema)(dataset_config)\n # check dataset meets DB style\n # NOTE: all DB subsets should have no \"metadata_file\"\n elif all([\"metadata_file\" not in subset for subset in subsets_config]):\n return Schema(self.db_dataset_schema)(dataset_config)\n else:\n raise voluptuous.Invalid(\"DreamBooth subset and fine tuning subset cannot be mixed in the same dataset. Please split them into separate datasets. / DreamBoothのサブセットとfine tuninのサブセットを同一のデータセットに混在させることはできません。別々のデータセットに分割してください。\")\n\n self.dataset_schema = validate_flex_dataset\n elif support_dreambooth:\n self.dataset_schema = self.db_dataset_schema\n else:\n self.dataset_schema = self.ft_dataset_schema\n\n self.general_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA if support_dreambooth else {},\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.user_config_validator = Schema({\n \"general\": self.general_schema,\n \"datasets\": [self.dataset_schema],\n })\n\n self.argparse_schema = self.__merge_dict(\n self.general_schema,\n self.ARGPARSE_SPECIFIC_SCHEMA,\n {optname: Any(None, self.general_schema[optname]) for optname in self.ARGPARSE_NULLABLE_OPTNAMES},\n {a_name: self.general_schema[c_name] for a_name, c_name in self.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME.items()},\n )\n\n self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA)\n\n def sanitize_user_config(self, user_config: dict) -> dict:\n try:\n return self.user_config_validator(user_config)\n except MultipleInvalid:\n # TODO: エラー発生時のメッセージをわかりやすくする\n print(\"Invalid user config / ユーザ設定の形式が正しくないようです\")\n raise\n\n # NOTE: In nature, argument parser result is not needed to be sanitize\n # However this will help us to detect program bug\n def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace:\n try:\n return self.argparse_config_validator(argparse_namespace)\n except MultipleInvalid:\n # XXX: this should be a bug\n print(\"Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。\")\n raise\n\n # NOTE: value would be overwritten by latter dict if there is already the same key\n @staticmethod\n def __merge_dict(*dict_list: dict) -> dict:\n merged = {}\n for schema in dict_list:\n # merged |= schema\n for k, v in schema.items():\n merged[k] = v\n return merged"
},
{
"identifier": "BlueprintGenerator",
"path": "library/config_util.py",
"snippet": "class BlueprintGenerator:\n BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {\n }\n\n def __init__(self, sanitizer: ConfigSanitizer):\n self.sanitizer = sanitizer\n\n # runtime_params is for parameters which is only configurable on runtime, such as tokenizer\n def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint:\n sanitized_user_config = self.sanitizer.sanitize_user_config(user_config)\n sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace)\n\n # convert argparse namespace to dict like config\n # NOTE: it is ok to have extra entries in dict\n optname_map = self.sanitizer.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME\n argparse_config = {optname_map.get(optname, optname): value for optname, value in vars(sanitized_argparse_namespace).items()}\n\n general_config = sanitized_user_config.get(\"general\", {})\n\n dataset_blueprints = []\n for dataset_config in sanitized_user_config.get(\"datasets\", []):\n # NOTE: if subsets have no \"metadata_file\", these are DreamBooth datasets/subsets\n subsets = dataset_config.get(\"subsets\", [])\n is_dreambooth = all([\"metadata_file\" not in subset for subset in subsets])\n if is_dreambooth:\n subset_params_klass = DreamBoothSubsetParams\n dataset_params_klass = DreamBoothDatasetParams\n else:\n subset_params_klass = FineTuningSubsetParams\n dataset_params_klass = FineTuningDatasetParams\n\n subset_blueprints = []\n for subset_config in subsets:\n params = self.generate_params_by_fallbacks(subset_params_klass,\n [subset_config, dataset_config, general_config, argparse_config, runtime_params])\n subset_blueprints.append(SubsetBlueprint(params))\n\n params = self.generate_params_by_fallbacks(dataset_params_klass,\n [dataset_config, general_config, argparse_config, runtime_params])\n dataset_blueprints.append(DatasetBlueprint(is_dreambooth, params, subset_blueprints))\n\n dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints)\n\n return Blueprint(dataset_group_blueprint)\n\n @staticmethod\n def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]):\n name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME\n search_value = BlueprintGenerator.search_value\n default_params = asdict(param_klass())\n param_names = default_params.keys()\n\n params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names}\n\n return param_klass(**params)\n\n @staticmethod\n def search_value(key: str, fallbacks: Sequence[dict], default_value = None):\n for cand in fallbacks:\n value = cand.get(key)\n if value is not None:\n return value\n\n return default_value"
},
{
"identifier": "apply_snr_weight",
"path": "library/custom_train_functions.py",
"snippet": "def apply_snr_weight(loss, timesteps, noise_scheduler, gamma):\r\n snr = torch.stack([noise_scheduler.all_snr[t] for t in timesteps])\r\n gamma_over_snr = torch.div(torch.ones_like(snr) * gamma, snr)\r\n snr_weight = torch.minimum(gamma_over_snr, torch.ones_like(gamma_over_snr)).float() # from paper\r\n loss = loss * snr_weight\r\n return loss\r"
},
{
"identifier": "prepare_scheduler_for_custom_training",
"path": "library/custom_train_functions.py",
"snippet": "def prepare_scheduler_for_custom_training(noise_scheduler, device):\r\n if hasattr(noise_scheduler, \"all_snr\"):\r\n return\r\n\r\n alphas_cumprod = noise_scheduler.alphas_cumprod\r\n sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)\r\n sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod)\r\n alpha = sqrt_alphas_cumprod\r\n sigma = sqrt_one_minus_alphas_cumprod\r\n all_snr = (alpha / sigma) ** 2\r\n\r\n noise_scheduler.all_snr = all_snr.to(device)\r"
},
{
"identifier": "pyramid_noise_like",
"path": "library/custom_train_functions.py",
"snippet": "def pyramid_noise_like(noise, device, iterations=6, discount=0.4):\r\n b, c, w, h = noise.shape # EDIT: w and h get over-written, rename for a different variant!\r\n u = torch.nn.Upsample(size=(w, h), mode=\"bilinear\").to(device)\r\n for i in range(iterations):\r\n r = random.random() * 2 + 2 # Rather than always going 2x,\r\n wn, hn = max(1, int(w / (r**i))), max(1, int(h / (r**i)))\r\n noise += u(torch.randn(b, c, wn, hn).to(device)) * discount**i\r\n if wn == 1 or hn == 1:\r\n break # Lowest resolution is 1x1\r\n return noise / noise.std() # Scaled back to roughly unit variance\r"
},
{
"identifier": "apply_noise_offset",
"path": "library/custom_train_functions.py",
"snippet": "def apply_noise_offset(latents, noise, noise_offset, adaptive_noise_scale):\r\n if noise_offset is None:\r\n return noise\r\n if adaptive_noise_scale is not None:\r\n # latent shape: (batch_size, channels, height, width)\r\n # abs mean value for each channel\r\n latent_mean = torch.abs(latents.mean(dim=(2, 3), keepdim=True))\r\n\r\n # multiply adaptive noise scale to the mean value and add it to the noise offset\r\n noise_offset = noise_offset + adaptive_noise_scale * latent_mean\r\n noise_offset = torch.clamp(noise_offset, 0.0, None) # in case of adaptive noise scale is negative\r\n\r\n noise = noise + noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)\r\n return noise\r"
},
{
"identifier": "scale_v_prediction_loss_like_noise_prediction",
"path": "library/custom_train_functions.py",
"snippet": "def scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler):\r\n snr_t = torch.stack([noise_scheduler.all_snr[t] for t in timesteps]) # batch_size\r\n snr_t = torch.minimum(snr_t, torch.ones_like(snr_t) * 1000) # if timestep is 0, snr_t is inf, so limit it to 1000\r\n scale = snr_t / (snr_t + 1)\r\n\r\n loss = loss * scale\r\n return loss\r"
},
{
"identifier": "unet_forward_XTI",
"path": "XTI_hijack.py",
"snippet": "def unet_forward_XTI(self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet2DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.config.num_class_embeds is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n down_i = 0\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states[down_i:down_i+2],\n )\n down_i += 2\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states[6])\n\n # 5. up\n up_i = 7\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states[up_i:up_i+3],\n upsample_size=upsample_size,\n )\n up_i += 3\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet2DConditionOutput(sample=sample)"
},
{
"identifier": "downblock_forward_XTI",
"path": "XTI_hijack.py",
"snippet": "def downblock_forward_XTI(\n self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None\n):\n output_states = ()\n i = 0\n\n for resnet, attn in zip(self.resnets, self.attentions):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states[i]\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states[i]).sample\n\n output_states += (hidden_states,)\n i += 1\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "upblock_forward_XTI",
"path": "XTI_hijack.py",
"snippet": "def upblock_forward_XTI(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n):\n i = 0\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states[i]\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states[i]).sample\n \n i += 1\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
}
] | import importlib
import argparse
import gc
import math
import os
import toml
import torch
import diffusers
import library.train_util as train_util
import library.huggingface_util as huggingface_util
import library.config_util as config_util
import library.custom_train_functions as custom_train_functions
from multiprocessing import Value
from tqdm import tqdm
from accelerate.utils import set_seed
from diffusers import DDPMScheduler
from library.config_util import (
ConfigSanitizer,
BlueprintGenerator,
)
from library.custom_train_functions import (
apply_snr_weight,
prepare_scheduler_for_custom_training,
pyramid_noise_like,
apply_noise_offset,
scale_v_prediction_loss_like_noise_prediction,
)
from XTI_hijack import unet_forward_XTI, downblock_forward_XTI, upblock_forward_XTI
from safetensors.torch import save_file
from safetensors.torch import load_file | 9,001 | vae.to(accelerator.device, dtype=weight_dtype)
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
text_encoder.to(weight_dtype)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion" if args.log_tracker_name is None else args.log_tracker_name)
# function for saving/removing
def save_model(ckpt_name, embs, steps, epoch_no, force_sync_upload=False):
os.makedirs(args.output_dir, exist_ok=True)
ckpt_file = os.path.join(args.output_dir, ckpt_name)
print(f"\nsaving checkpoint: {ckpt_file}")
save_weights(ckpt_file, embs, save_dtype)
if args.huggingface_repo_id is not None:
huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
def remove_model(old_ckpt_name):
old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
if os.path.exists(old_ckpt_file):
print(f"removing old checkpoint: {old_ckpt_file}")
os.remove(old_ckpt_file)
# training loop
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
with accelerator.accumulate(text_encoder):
with torch.no_grad():
if "latents" in batch and batch["latents"] is not None:
latents = batch["latents"].to(accelerator.device)
else:
# latentに変換
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Get the text embedding for conditioning
input_ids = batch["input_ids"].to(accelerator.device)
# weight_dtype) use float instead of fp16/bf16 because text encoder is float
encoder_hidden_states = torch.stack(
[
train_util.get_hidden_states(args, s, tokenizer, text_encoder, weight_dtype)
for s in torch.split(input_ids, 1, dim=1)
]
)
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount)
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Predict the noise residual
with accelerator.autocast():
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states).sample
if args.v_parameterization:
# v-parameterization training
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
target = noise
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none")
loss = loss.mean([1, 2, 3])
loss_weights = batch["loss_weights"] # 各sampleごとのweight
loss = loss * loss_weights
if args.min_snr_gamma:
loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma)
if args.scale_v_pred_loss_like_noise_pred:
|
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
def train(args):
if args.output_name is None:
args.output_name = args.token_string
use_template = args.use_object_template or args.use_style_template
train_util.verify_training_args(args)
train_util.prepare_dataset_args(args, True)
if args.sample_every_n_steps is not None or args.sample_every_n_epochs is not None:
print(
"sample_every_n_steps and sample_every_n_epochs are not supported in this script currently / sample_every_n_stepsとsample_every_n_epochsは現在このスクリプトではサポートされていません"
)
assert (
args.dataset_class is None
), "dataset_class is not supported in this script currently / dataset_classは現在このスクリプトではサポートされていません"
cache_latents = args.cache_latents
if args.seed is not None:
set_seed(args.seed)
tokenizer = train_util.load_tokenizer(args)
# acceleratorを準備する
print("prepare accelerator")
accelerator, unwrap_model = train_util.prepare_accelerator(args)
# mixed precisionに対応した型を用意しておき適宜castする
weight_dtype, save_dtype = train_util.prepare_dtype(args)
# モデルを読み込む
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator)
# Convert the init_word to token_id
if args.init_word is not None:
init_token_ids = tokenizer.encode(args.init_word, add_special_tokens=False)
if len(init_token_ids) > 1 and len(init_token_ids) != args.num_vectors_per_token:
print(
f"token length for init words is not same to num_vectors_per_token, init words is repeated or truncated / 初期化単語のトークン長がnum_vectors_per_tokenと合わないため、繰り返しまたは切り捨てが発生します: length {len(init_token_ids)}"
)
else:
init_token_ids = None
# add new word to tokenizer, count is num_vectors_per_token
token_strings = [args.token_string] + [f"{args.token_string}{i+1}" for i in range(args.num_vectors_per_token - 1)]
num_added_tokens = tokenizer.add_tokens(token_strings)
assert (
num_added_tokens == args.num_vectors_per_token
), f"tokenizer has same word to token string. please use another one / 指定したargs.token_stringは既に存在します。別の単語を使ってください: {args.token_string}"
token_ids = tokenizer.convert_tokens_to_ids(token_strings)
print(f"tokens are added: {token_ids}")
assert min(token_ids) == token_ids[0] and token_ids[-1] == token_ids[0] + len(token_ids) - 1, f"token ids is not ordered"
assert len(tokenizer) - 1 == token_ids[-1], f"token ids is not end of tokenize: {len(tokenizer)}"
token_strings_XTI = []
XTI_layers = [
"IN01",
"IN02",
"IN04",
"IN05",
"IN07",
"IN08",
"MID",
"OUT03",
"OUT04",
"OUT05",
"OUT06",
"OUT07",
"OUT08",
"OUT09",
"OUT10",
"OUT11",
]
for layer_name in XTI_layers:
token_strings_XTI += [f"{t}_{layer_name}" for t in token_strings]
tokenizer.add_tokens(token_strings_XTI)
token_ids_XTI = tokenizer.convert_tokens_to_ids(token_strings_XTI)
print(f"tokens are added (XTI): {token_ids_XTI}")
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
if init_token_ids is not None:
for i, token_id in enumerate(token_ids_XTI):
token_embeds[token_id] = token_embeds[init_token_ids[(i // 16) % len(init_token_ids)]]
# print(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
# load weights
if args.weights is not None:
embeddings = load_weights(args.weights)
assert len(token_ids) == len(
embeddings
), f"num_vectors_per_token is mismatch for weights / 指定した重みとnum_vectors_per_tokenの値が異なります: {len(embeddings)}"
# print(token_ids, embeddings.size())
for token_id, embedding in zip(token_ids_XTI, embeddings):
token_embeds[token_id] = embedding
# print(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
print(f"weighs loaded")
print(f"create embeddings for {args.num_vectors_per_token} tokens, for {args.token_string}")
# データセットを準備する
blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, False))
if args.dataset_config is not None:
print(f"Load dataset config from {args.dataset_config}")
user_config = config_util.load_user_config(args.dataset_config)
ignored = ["train_data_dir", "reg_data_dir", "in_json"]
if any(getattr(args, attr) is not None for attr in ignored):
print(
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
", ".join(ignored)
)
)
else:
use_dreambooth_method = args.in_json is None
if use_dreambooth_method:
print("Use DreamBooth method.")
user_config = {
"datasets": [
{"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(args.train_data_dir, args.reg_data_dir)}
]
}
else:
print("Train with captions.")
user_config = {
"datasets": [
{
"subsets": [
{
"image_dir": args.train_data_dir,
"metadata_file": args.in_json,
}
]
}
]
}
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
train_dataset_group.enable_XTI(XTI_layers, token_strings=token_strings)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
# make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装
if use_template:
print("use template for training captions. is object: {args.use_object_template}")
templates = imagenet_templates_small if args.use_object_template else imagenet_style_templates_small
replace_to = " ".join(token_strings)
captions = []
for tmpl in templates:
captions.append(tmpl.format(replace_to))
train_dataset_group.add_replacement("", captions)
if args.num_vectors_per_token > 1:
prompt_replacement = (args.token_string, replace_to)
else:
prompt_replacement = None
else:
if args.num_vectors_per_token > 1:
replace_to = " ".join(token_strings)
train_dataset_group.add_replacement(args.token_string, replace_to)
prompt_replacement = (args.token_string, replace_to)
else:
prompt_replacement = None
if args.debug_dataset:
train_util.debug_dataset(train_dataset_group, show_input_ids=True)
return
if len(train_dataset_group) == 0:
print("No data found. Please verify arguments / 画像がありません。引数指定を確認してください")
return
if cache_latents:
assert (
train_dataset_group.is_latent_cacheable()
), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
diffusers.models.UNet2DConditionModel.forward = unet_forward_XTI
diffusers.models.unet_2d_blocks.CrossAttnDownBlock2D.forward = downblock_forward_XTI
diffusers.models.unet_2d_blocks.CrossAttnUpBlock2D.forward = upblock_forward_XTI
# 学習を準備する
if cache_latents:
vae.to(accelerator.device, dtype=weight_dtype)
vae.requires_grad_(False)
vae.eval()
with torch.no_grad():
train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
vae.to("cpu")
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
accelerator.wait_for_everyone()
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
# 学習に必要なクラスを準備する
print("prepare optimizer, data loader etc.")
trainable_params = text_encoder.get_input_embeddings().parameters()
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
# dataloaderを準備する
# DataLoaderのプロセス数:0はメインプロセスになる
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで
train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
# lr schedulerを用意する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# acceleratorがなんかよろしくやってくれるらしい
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
index_no_updates = torch.arange(len(tokenizer)) < token_ids_XTI[0]
# print(len(index_no_updates), torch.sum(index_no_updates))
orig_embeds_params = unwrap_model(text_encoder).get_input_embeddings().weight.data.detach().clone()
# Freeze all parameters except for the token embeddings in text encoder
text_encoder.requires_grad_(True)
text_encoder.text_model.encoder.requires_grad_(False)
text_encoder.text_model.final_layer_norm.requires_grad_(False)
text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
# text_encoder.text_model.embeddings.token_embedding.requires_grad_(True)
unet.requires_grad_(False)
unet.to(accelerator.device, dtype=weight_dtype)
if args.gradient_checkpointing: # according to TI example in Diffusers, train is required
unet.train()
else:
unet.eval()
if not cache_latents:
vae.requires_grad_(False)
vae.eval()
vae.to(accelerator.device, dtype=weight_dtype)
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
text_encoder.to(weight_dtype)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion" if args.log_tracker_name is None else args.log_tracker_name)
# function for saving/removing
def save_model(ckpt_name, embs, steps, epoch_no, force_sync_upload=False):
os.makedirs(args.output_dir, exist_ok=True)
ckpt_file = os.path.join(args.output_dir, ckpt_name)
print(f"\nsaving checkpoint: {ckpt_file}")
save_weights(ckpt_file, embs, save_dtype)
if args.huggingface_repo_id is not None:
huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
def remove_model(old_ckpt_name):
old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
if os.path.exists(old_ckpt_file):
print(f"removing old checkpoint: {old_ckpt_file}")
os.remove(old_ckpt_file)
# training loop
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
with accelerator.accumulate(text_encoder):
with torch.no_grad():
if "latents" in batch and batch["latents"] is not None:
latents = batch["latents"].to(accelerator.device)
else:
# latentに変換
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Get the text embedding for conditioning
input_ids = batch["input_ids"].to(accelerator.device)
# weight_dtype) use float instead of fp16/bf16 because text encoder is float
encoder_hidden_states = torch.stack(
[
train_util.get_hidden_states(args, s, tokenizer, text_encoder, weight_dtype)
for s in torch.split(input_ids, 1, dim=1)
]
)
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount)
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Predict the noise residual
with accelerator.autocast():
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states).sample
if args.v_parameterization:
# v-parameterization training
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
target = noise
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none")
loss = loss.mean([1, 2, 3])
loss_weights = batch["loss_weights"] # 各sampleごとのweight
loss = loss * loss_weights
if args.min_snr_gamma:
loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma)
if args.scale_v_pred_loss_like_noise_pred: | loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler) | 6 | 2023-12-30 07:46:35+00:00 | 12k |
Hatins/DEOE | modules/detection.py | [
{
"identifier": "ObjectLabels",
"path": "data/genx_utils/labels.py",
"snippet": "class ObjectLabels(ObjectLabelBase):\n def __init__(self,\n object_labels: th.Tensor,\n input_size_hw: Tuple[int, int]):\n super().__init__(object_labels=object_labels, input_size_hw=input_size_hw)\n\n def __len__(self) -> int:\n return self.object_labels.shape[0]\n\n def rotate_(self, angle_deg: float):\n if len(self) == 0:\n return\n # (x0,y0)---(x1,y0) p00---p10\n # | | | |\n # | | | |\n # (x0,y1)---(x1,y1) p01---p11\n p00 = th.stack((self.x, self.y), dim=1)\n p10 = th.stack((self.x + self.w, self.y), dim=1)\n p01 = th.stack((self.x, self.y + self.h), dim=1)\n p11 = th.stack((self.x + self.w, self.y + self.h), dim=1)\n # points: 4 x N x 2\n points = th.stack((p00, p10, p01, p11), dim=0)\n\n cx = self._input_size_hw[1] // 2\n cy = self._input_size_hw[0] // 2\n center = th.tensor([cx, cy], device=self.device)\n\n angle_rad = angle_deg / 180 * math.pi\n # counter-clockwise rotation\n rot_matrix = th.tensor([[math.cos(angle_rad), math.sin(angle_rad)],\n [-math.sin(angle_rad), math.cos(angle_rad)]], device=self.device)\n\n points = points - center\n points = th.einsum('ij,pnj->pni', rot_matrix, points)\n points = points + center\n\n height, width = self.input_size_hw\n x0 = th.clamp(th.min(points[..., 0], dim=0)[0], min=0, max=width - 1)\n y0 = th.clamp(th.min(points[..., 1], dim=0)[0], min=0, max=height - 1)\n x1 = th.clamp(th.max(points[..., 0], dim=0)[0], min=0, max=width - 1)\n y1 = th.clamp(th.max(points[..., 1], dim=0)[0], min=0, max=height - 1)\n\n self.x = x0\n self.y = y0\n self.w = x1 - x0\n self.h = y1 - y0\n\n self.remove_flat_labels_()\n\n assert th.all(self.x >= 0)\n assert th.all(self.y >= 0)\n assert th.all(self.x + self.w <= self.input_size_hw[1] - 1)\n assert th.all(self.y + self.h <= self.input_size_hw[0] - 1)\n\n def zoom_in_and_rescale_(self, zoom_coordinates_x0y0: Tuple[int, int], zoom_in_factor: float):\n \"\"\"\n 1) Computes a new smaller canvas size: original canvas scaled by a factor of 1/zoom_in_factor (downscaling)\n 2) Places the smaller canvas inside the original canvas at the top-left coordinates zoom_coordinates_x0y0\n 3) Extract the smaller canvas and rescale it back to the original resolution\n \"\"\"\n if len(self) == 0:\n return\n assert len(zoom_coordinates_x0y0) == 2\n assert zoom_in_factor >= 1\n if zoom_in_factor == 1:\n return\n z_x0, z_y0 = zoom_coordinates_x0y0\n h_orig, w_orig = self.input_size_hw\n assert 0 <= z_x0 <= w_orig - 1\n assert 0 <= z_y0 <= h_orig - 1\n zoom_window_h, zoom_window_w = tuple(x / zoom_in_factor for x in self.input_size_hw)\n z_x1 = min(z_x0 + zoom_window_w, w_orig - 1)\n assert z_x1 <= w_orig - 1, f'{z_x1=} is larger than {w_orig-1=}'\n z_y1 = min(z_y0 + zoom_window_h, h_orig - 1)\n assert z_y1 <= h_orig - 1, f'{z_y1=} is larger than {h_orig-1=}'\n\n x0 = th.clamp(self.x, min=z_x0, max=z_x1 - 1)\n y0 = th.clamp(self.y, min=z_y0, max=z_y1 - 1)\n\n x1 = th.clamp(self.x + self.w, min=z_x0, max=z_x1 - 1)\n y1 = th.clamp(self.y + self.h, min=z_y0, max=z_y1 - 1)\n\n self.x = x0 - z_x0\n self.y = y0 - z_y0\n self.w = x1 - x0\n self.h = y1 - y0\n self.input_size_hw = (zoom_window_h, zoom_window_w)\n\n self.remove_flat_labels_()\n\n self.scale_(scaling_multiplier=zoom_in_factor)\n\n def zoom_out_and_rescale_(self, zoom_coordinates_x0y0: Tuple[int, int], zoom_out_factor: float):\n \"\"\"\n 1) Scales the input by a factor of 1/zoom_out_factor (i.e. reduces the canvas size)\n 2) Places the downscaled canvas into the original canvas at the top-left coordinates zoom_coordinates_x0y0\n \"\"\"\n if len(self) == 0:\n return\n assert len(zoom_coordinates_x0y0) == 2\n assert zoom_out_factor >= 1\n if zoom_out_factor == 1:\n return\n\n h_orig, w_orig = self.input_size_hw\n self.scale_(scaling_multiplier=1 / zoom_out_factor)\n\n self.input_size_hw = (h_orig, w_orig)\n z_x0, z_y0 = zoom_coordinates_x0y0\n assert 0 <= z_x0 <= w_orig - 1\n assert 0 <= z_y0 <= h_orig - 1\n\n self.x = self.x + z_x0\n self.y = self.y + z_y0\n\n def scale_(self, scaling_multiplier: float):\n if len(self) == 0:\n return\n assert scaling_multiplier > 0\n if scaling_multiplier == 1:\n return\n img_ht, img_wd = self.input_size_hw\n new_img_ht = scaling_multiplier * img_ht\n new_img_wd = scaling_multiplier * img_wd\n self.input_size_hw = (new_img_ht, new_img_wd)\n x1 = th.clamp((self.x + self.w) * scaling_multiplier, max=new_img_wd - 1)\n y1 = th.clamp((self.y + self.h) * scaling_multiplier, max=new_img_ht - 1)\n self.x = self.x * scaling_multiplier\n self.y = self.y * scaling_multiplier\n\n self.w = x1 - self.x\n self.h = y1 - self.y\n\n self.remove_flat_labels_()\n\n def flip_lr_(self) -> None:\n if len(self) == 0:\n return\n self.x = self.input_size_hw[1] - 1 - self.x - self.w\n\n def get_labels_as_tensors(self, keep_classes: list = [], format_: str = 'yolox') -> th.Tensor:\n self._assert_not_numpy()\n if format_ == 'yolox':\n out = th.zeros((len(self), 6), dtype=th.float32, device=self.device)\n if len(self) == 0:\n return out\n out[:, 0] = self.class_id\n out[:, 1] = self.x + 0.5 * self.w\n out[:, 2] = self.y + 0.5 * self.h\n out[:, 3] = self.w\n out[:, 4] = self.h\n out[:, 5] = th.tensor([int(i) in keep_classes for i in self.class_id]) + 0\n\n return out\n else:\n raise NotImplementedError\n\n @staticmethod\n def get_labels_as_batched_tensor(obj_label_list: List[ObjectLabels], training_classes: list = [], format_: str = 'yolox') -> th.Tensor:\n num_object_frames = len(obj_label_list)\n assert num_object_frames > 0\n max_num_labels_per_object_frame = max([len(x) for x in obj_label_list])\n assert max_num_labels_per_object_frame > 0\n if format_ == 'yolox':\n tensor_labels = []\n for labels in obj_label_list:\n obj_labels_tensor = labels.get_labels_as_tensors(keep_classes = training_classes, format_=format_)\n num_to_pad = max_num_labels_per_object_frame - len(labels)\n padded_labels = pad(obj_labels_tensor, (0, 0, 0, num_to_pad), mode='constant', value=0)\n tensor_labels.append(padded_labels)\n tensor_labels = th.stack(tensors=tensor_labels, dim=0)\n return tensor_labels\n else:\n raise NotImplementedError\n \n @staticmethod\n def labels_mapping(device, labels_: th.Tensor):\n mask = th.where(labels_[:, :, -1] == 1, th.tensor(1, device = device), th.tensor(0, device = device))\n label = labels_ * mask.unsqueeze(-1)\n label[:, :, [0, -1]] = label[:, :, [-1, 0]]\n label = label[:, :, 0:-1]\n label[:, :, 0] = 0\n return label"
},
{
"identifier": "DataType",
"path": "data/utils/types.py",
"snippet": "class DataType(Enum):\nclass DatasetType(Enum):\nclass DatasetMode(Enum):\nclass DatasetSamplingMode(StrEnum):\nclass ObjDetOutput(Enum):\n EV_REPR = auto()\n FLOW = auto()\n IMAGE = auto()\n OBJLABELS = auto()\n OBJLABELS_SEQ = auto()\n IS_PADDED_MASK = auto()\n IS_FIRST_SAMPLE = auto()\n TOKEN_MASK = auto()\n GEN1 = auto()\n GEN4 = auto()\n TRAIN = auto()\n VALIDATION = auto()\n TESTING = auto()\n RANDOM = 'random'\n STREAM = 'stream'\n MIXED = 'mixed'\n LABELS_PROPH = auto()\n PRED_PROPH = auto()\n EV_REPR = auto()\n SKIP_VIZ = auto()"
},
{
"identifier": "postprocess",
"path": "models/detection/yolox/utils/boxes.py",
"snippet": "def postprocess(prediction, conf_thre=0.7, nms_thre=0.45, mode= 'train'):\n #modified: this place use the class_conf for mask, which is need to be fixed.\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for i, image_pred in enumerate(prediction):\n\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get score and class with highest confidence\n class_pred = torch.zeros(image_pred.shape[0],1).to(image_pred.device)\n if mode == 'val':\n conf_mask = keep_top_k_scores(image_pred, 1500)\n else:\n conf_mask = (image_pred[:, 4] >= conf_thre).squeeze()\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((image_pred[:, :5], class_pred.float()), 1)\n detections = detections[conf_mask]\n if not detections.size(0):\n continue\n\n if mode == 'val':\n nms_out_index = nms_with_fixed_output(detections)\n else:\n nms_out_index = torchvision.ops.nms(\n detections[:, :4],\n detections[:, 4],\n nms_thre,\n )\n\n detections = detections[nms_out_index]\n if output[i] is None:\n output[i] = detections\n else:\n output[i] = torch.cat((output[i], detections))\n\n return output"
},
{
"identifier": "YoloXDetector",
"path": "models/detection/yolox_extension/models/detector.py",
"snippet": "class YoloXDetector(th.nn.Module):\n def __init__(self,\n model_cfg: DictConfig):\n super().__init__()\n backbone_cfg = model_cfg.backbone\n fpn_cfg = model_cfg.fpn\n head_cfg = model_cfg.head\n\n self.backbone = build_recurrent_backbone(backbone_cfg)\n\n in_channels = self.backbone.get_stage_dims(fpn_cfg.in_stages)\n self.fpn = build_yolox_fpn(fpn_cfg, in_channels=in_channels)\n\n strides = self.backbone.get_strides(fpn_cfg.in_stages)\n self.yolox_head = build_yolox_head(head_cfg, in_channels=in_channels, strides=strides)\n\n def forward_backbone(self,\n x: th.Tensor,\n previous_states: Optional[LstmStates] = None,\n token_mask: Optional[th.Tensor] = None) -> \\\n Tuple[BackboneFeatures, LstmStates]:\n with CudaTimer(device=x.device, timer_name=\"Backbone\"):\n backbone_features, states = self.backbone(x, previous_states, token_mask)\n return backbone_features, states\n\n def forward_detect(self,\n backbone_features: BackboneFeatures,\n targets: Optional[th.Tensor] = None,\n prev_reg: th.Tensor = None) -> \\\n Tuple[th.Tensor, Union[Dict[str, th.Tensor], None]]:\n device = next(iter(backbone_features.values())).device\n with CudaTimer(device=device, timer_name=\"FPN\"):\n fpn_features = self.fpn(backbone_features)\n if self.training:\n assert targets is not None\n with CudaTimer(device=device, timer_name=\"HEAD + Loss\"):\n outputs, losses = self.yolox_head(fpn_features, targets, prev_reg)\n return outputs, losses\n with CudaTimer(device=device, timer_name=\"HEAD\"):\n outputs, losses = self.yolox_head(fpn_features)\n assert losses is None\n return outputs, losses\n\n def forward(self,\n x: th.Tensor,\n previous_states: Optional[LstmStates] = None,\n retrieve_detections: bool = True,\n targets: Optional[th.Tensor] = None) -> \\\n Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:\n backbone_features, states = self.forward_backbone(x, previous_states)\n outputs, losses = None, None\n if not retrieve_detections:\n assert targets is None\n return outputs, losses, states\n outputs, losses = self.forward_detect(backbone_features=backbone_features, targets=targets)\n return outputs, losses, states"
},
{
"identifier": "PropheseeEvaluator",
"path": "utils/evaluation/prophesee/evaluator.py",
"snippet": "class PropheseeEvaluator:\n LABELS = 'lables'\n PREDICTIONS = 'predictions'\n\n def __init__(self, dataset: str, downsample_by_2: bool):\n super().__init__()\n assert dataset in {'gen1', 'gen4'}\n self.dataset = dataset\n self.downsample_by_2 = downsample_by_2\n\n self._buffer = None\n self._buffer_empty = True\n self._reset_buffer()\n self.ignored = True\n\n def _reset_buffer(self):\n self._buffer_empty = True\n self._buffer = {\n self.LABELS: list(),\n self.PREDICTIONS: list(),\n }\n\n def set_ignored_to_False(self):\n self.ignored = False\n\n def _add_to_buffer(self, key: str, value: List[np.ndarray]):\n assert isinstance(value, list)\n for entry in value:\n assert isinstance(entry, np.ndarray)\n self._buffer_empty = False\n assert self._buffer is not None\n self._buffer[key].extend(value)\n\n def _get_from_buffer(self, key: str) -> List[np.ndarray]:\n assert not self._buffer_empty\n assert self._buffer is not None\n return self._buffer[key]\n\n def add_predictions(self, predictions: List[np.ndarray]):\n self._add_to_buffer(self.PREDICTIONS, predictions)\n\n def add_labels(self, labels: List[np.ndarray]):\n self._add_to_buffer(self.LABELS, labels)\n\n def reset_buffer(self) -> None:\n # E.g. call in on_validation_epoch_start\n self._reset_buffer()\n\n def has_data(self):\n return not self._buffer_empty\n\n def evaluate_buffer(self, img_height: int, img_width: int) -> Optional[Dict[str, Any]]:\n # e.g call in on_validation_epoch_end\n if self._buffer_empty:\n warn(\"Attempt to use prophesee evaluation buffer, but it is empty\", UserWarning, stacklevel=2)\n return\n\n labels = self._get_from_buffer(self.LABELS)\n predictions = self._get_from_buffer(self.PREDICTIONS)\n assert len(labels) == len(predictions)\n metrics = evaluate_list(result_boxes_list=predictions,\n gt_boxes_list=labels,\n height=img_height,\n width=img_width,\n apply_bbox_filters=True,\n downsampled_by_2=self.downsample_by_2,\n camera=self.dataset,\n ignored=self.ignored)\n return metrics"
},
{
"identifier": "to_prophesee",
"path": "utils/evaluation/prophesee/io/box_loading.py",
"snippet": "def to_prophesee(loaded_label_list: LOADED_LABELS, yolox_pred_list: YOLOX_PRED_PROCESSED, keep_classes: List = []) -> \\\n Tuple[List[np.ndarray], List[np.ndarray]]:\n \n assert len(loaded_label_list) == len(yolox_pred_list)\n loaded_label_list_proph = []\n yolox_pred_list_proph = []\n\n for loaded_labels, yolox_preds in zip(loaded_label_list, yolox_pred_list):\n # TODO: use loaded_label_to_prophesee func here\n time = None\n # --- LOADED LABELS ---\n\n loaded_labels.numpy_()\n loaded_label_proph = np.zeros((len(loaded_labels),), dtype=BBOX_DTYPE)\n for name in BBOX_DTYPE.names:\n if name == 'ignored_split':\n label = np.asarray(loaded_labels.get('class_id'), dtype=BBOX_DTYPE['class_id'])\n loaded_label_proph[name] = np.isin(label, np.array(keep_classes)).astype(dtype=BBOX_DTYPE[name])\n loaded_label_proph[name] = np.where(loaded_label_proph[name] == 0, 1, 0)\n continue\n # if name == 'class_Id':\n # loaded_label_proph[name] = np.zeros_like(np.asarray(loaded_labels.get(name), dtype=BBOX_DTYPE[name]))\n loaded_label_proph[name] = np.asarray(loaded_labels.get(name), dtype=BBOX_DTYPE[name])\n # if name =='class_id':\n # loaded_label_proph[name] = np.asarray(np.zeros_like(loaded_labels.get(name)), dtype=BBOX_DTYPE[name])\n if name == 't':\n time = np.unique(loaded_labels.get(name))\n assert time.size == 1\n time = time.item()\n\n #modified: we assign the class in keep_classes to 0\n # loaded_label_proph = np.array([(item[0], item[1], item[2], item[3], item[4], 0, item[6], item[7]) \n # for item in loaded_label_proph if int(item[5]) in keep_classes],dtype=BBOX_DTYPE)\n \n loaded_label_list_proph.append(loaded_label_proph)\n\n # --- YOLOX PREDICTIONS ---\n # Assumes batch of post-processed predictions from YoloX Head.\n # See postprocessing: https://github.com/Megvii-BaseDetection/YOLOX/blob/a5bb5ab12a61b8a25a5c3c11ae6f06397eb9b296/yolox/utils/boxes.py#L32\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n num_pred = 0 if yolox_preds is None else yolox_preds.shape[0]\n yolox_pred_proph = np.zeros((num_pred,), dtype=BBOX_DTYPE)\n if num_pred > 0:\n if isinstance(loaded_labels, np.ndarray):\n yolox_preds = yolox_preds\n else:\n yolox_preds = yolox_preds.detach().cpu().numpy()\n assert yolox_preds.shape == (num_pred, 6)\n yolox_pred_proph['t'] = np.ones((num_pred,), dtype=BBOX_DTYPE['t']) * time\n yolox_pred_proph['x'] = np.asarray(yolox_preds[:, 0], dtype=BBOX_DTYPE['x'])\n yolox_pred_proph['y'] = np.asarray(yolox_preds[:, 1], dtype=BBOX_DTYPE['y'])\n yolox_pred_proph['w'] = np.asarray(yolox_preds[:, 2] - yolox_preds[:, 0], dtype=BBOX_DTYPE['w'])\n yolox_pred_proph['h'] = np.asarray(yolox_preds[:, 3] - yolox_preds[:, 1], dtype=BBOX_DTYPE['h'])\n yolox_pred_proph['class_id'] = np.asarray(yolox_preds[:, 5], dtype=BBOX_DTYPE['class_id'])\n yolox_pred_proph['class_confidence'] = np.asarray(yolox_preds[:, 4], dtype=BBOX_DTYPE['class_confidence'])\n yolox_pred_list_proph.append(yolox_pred_proph)\n\n return loaded_label_list_proph, yolox_pred_list_proph"
},
{
"identifier": "InputPadderFromShape",
"path": "utils/padding.py",
"snippet": "class InputPadderFromShape:\n def __init__(self, desired_hw: Tuple[int, int], mode: str = 'constant', value: int = 0, type: str = 'corner'):\n \"\"\"\n :param desired_hw: Desired height and width\n :param mode: See torch.nn.functional.pad\n :param value: See torch.nn.functional.pad\n :param type: \"corner\": add zero to bottom and right\n \"\"\"\n assert isinstance(desired_hw, tuple)\n assert len(desired_hw) == 2\n assert desired_hw[0] % 4 == 0, 'Required for token mask padding'\n assert desired_hw[1] % 4 == 0, 'Required for token mask padding'\n assert type in {'corner'}\n\n self.desired_hw = desired_hw\n self.mode = mode\n self.value = value\n self.type = type\n self._pad_ev_repr = None\n self._pad_token_mask = None\n\n @staticmethod\n def _pad_tensor_impl(input_tensor: th.Tensor, desired_hw: Tuple[int, int], mode: str, value: Any) \\\n -> Tuple[th.Tensor, List[int]]:\n assert isinstance(input_tensor, th.Tensor)\n\n ht, wd = input_tensor.shape[-2:]\n ht_des, wd_des = desired_hw\n assert ht <= ht_des\n assert wd <= wd_des\n\n pad_left = 0\n pad_right = wd_des - wd\n pad_top = 0\n pad_bottom = ht_des - ht\n\n pad = [pad_left, pad_right, pad_top, pad_bottom]\n return F.pad(input_tensor, pad=pad, mode=mode, value=value if mode == 'constant' else None), pad\n\n def pad_tensor_ev_repr(self, ev_repr: th.Tensor) -> th.Tensor:\n padded_ev_repr, pad = self._pad_tensor_impl(input_tensor=ev_repr, desired_hw=self.desired_hw,\n mode=self.mode, value=self.value)\n if self._pad_ev_repr is None:\n self._pad_ev_repr = pad\n else:\n assert self._pad_ev_repr == pad\n return padded_ev_repr\n\n def pad_token_mask(self, token_mask: th.Tensor):\n assert isinstance(token_mask, th.Tensor)\n\n desired_hw = tuple(x // 4 for x in self.desired_hw)\n padded_token_mask, pad = self._pad_tensor_impl(input_tensor=token_mask, desired_hw=desired_hw,\n mode='constant', value=0)\n if self._pad_token_mask is None:\n self._pad_token_mask = pad\n else:\n assert self._pad_token_mask == pad\n return padded_token_mask"
},
{
"identifier": "BackboneFeatureSelector",
"path": "modules/utils/detection.py",
"snippet": "class Mode(Enum):\nclass BackboneFeatureSelector:\nclass EventReprSelector:\nclass REGStates:\nclass RNNStates:\n TRAIN = auto()\n VAL = auto()\n TEST = auto()\n def __init__(self):\n def reset(self):\n def add_backbone_features(self,\n backbone_features: BackboneFeatures,\n selected_indices: Optional[List[int]] = None) -> None:\n def get_batched_backbone_features(self) -> Optional[BackboneFeatures]:\n def __init__(self):\n def reset(self):\n def __len__(self):\n def add_event_representations(\n self, event_representations: th.Tensor, selected_indices: Optional[List[int]] = None) -> None:\n def get_event_representations_as_list(\n self, start_idx: int = 0, end_idx: Optional[int] = None) -> Optional[List[th.Tensor]]:\n def __init__(self):\n def _has_states(self):\n def recursive_detach(cls, inp: th.Tensor):\n def recursive_reset(cls,\n inp:th.Tensor,\n indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def save_states_and_detach(self, worker_id: int, prev_reg: th.tensor) -> None:\n def get_states(self, worker_id: int):\n def reset(self, worker_id: int, indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def __init__(self):\n def _has_states(self):\n def recursive_detach(cls, inp: Union[th.Tensor, List, Tuple, Dict]):\n def recursive_reset(cls,\n inp: Union[th.Tensor, List, Tuple, Dict],\n indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\n def save_states_and_detach(self, worker_id: int, states: LstmStates) -> None:\n def get_states(self, worker_id: int) -> Optional[LstmStates]:\n def reset(self, worker_id: int, indices_or_bool_tensor: Optional[Union[List[int], torch.Tensor]] = None):\ndef mixed_collate_fn(x1: Union[th.Tensor, List[th.Tensor]], x2: Union[th.Tensor, List[th.Tensor]]):\ndef merge_mixed_batches(batch: Dict[str, Any]):"
}
] | from typing import Any, Optional, Tuple, Union, Dict
from warnings import warn
from omegaconf import DictConfig
from pytorch_lightning.utilities.types import STEP_OUTPUT
from data.genx_utils.labels import ObjectLabels
from data.utils.types import DataType, LstmStates, ObjDetOutput, DatasetSamplingMode
from models.detection.yolox.utils.boxes import postprocess
from models.detection.yolox_extension.models.detector import YoloXDetector
from utils.evaluation.prophesee.evaluator import PropheseeEvaluator
from utils.evaluation.prophesee.io.box_loading import to_prophesee
from utils.padding import InputPadderFromShape
from .utils.detection import BackboneFeatureSelector, EventReprSelector, RNNStates, REGStates, Mode, mode_2_string, \
merge_mixed_batches
import numpy as np
import pytorch_lightning as pl
import torch
import torch as th
import torch.distributed as dist
import os
import cv2
import ipdb | 8,815 | retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
|
def remove_elements(ori_items, moving_items):
return [elem for elem in ori_items if elem not in moving_items]
class Module(pl.LightningModule):
def __init__(self, full_config: DictConfig):
super().__init__()
self.full_config = full_config
self.mdl_config = full_config.model
in_res_hw = tuple(self.mdl_config.backbone.in_res_hw)
self.input_padder = InputPadderFromShape(desired_hw=in_res_hw)
self.mdl = YoloXDetector(self.mdl_config)
self.mode_2_rnn_states: Dict[Mode, RNNStates] = {
Mode.TRAIN: RNNStates(),
Mode.VAL: RNNStates(),
Mode.TEST: RNNStates(),
}
self.reg_states = REGStates()
def setup(self, stage: Optional[str] = None) -> None:
dataset_name = self.full_config.dataset.name
self.mode_2_hw: Dict[Mode, Optional[Tuple[int, int]]] = {}
self.mode_2_batch_size: Dict[Mode, Optional[int]] = {}
self.mode_2_psee_evaluator: Dict[Mode, Optional[PropheseeEvaluator]] = {}
self.mode_2_sampling_mode: Dict[Mode, DatasetSamplingMode] = {}
self.started_training = True
dataset_train_sampling = self.full_config.dataset.train.sampling
dataset_eval_sampling = self.full_config.dataset.eval.sampling
assert dataset_train_sampling in iter(DatasetSamplingMode)
assert dataset_eval_sampling in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
if stage == 'fit': # train + val
self.training_classes = self.full_config.dataset.training_classes
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.train_config = self.full_config.training
self.train_metrics_config = self.full_config.logging.train.metrics
if self.train_metrics_config.compute:
self.mode_2_psee_evaluator[Mode.TRAIN] = PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
#We set two evaluator, one (0) for unseen classes and one (1) for all classes
self.mode_2_psee_evaluator[Mode.VAL] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TRAIN] = dataset_train_sampling
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
for mode in (Mode.TRAIN, Mode.VAL):
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
self.started_training = False
elif stage == 'validate':
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
mode = Mode.VAL
self.mode_2_psee_evaluator[mode] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
elif stage == 'test':
mode = Mode.TEST
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.mode_2_psee_evaluator[Mode.TEST] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TEST] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
else:
raise NotImplementedError
def forward(self,
event_tensor: th.Tensor,
previous_states: Optional[LstmStates] = None,
retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step | ev_tensor_sequence = data[DataType.EV_REPR] | 1 | 2023-12-29 04:04:34+00:00 | 12k |
Wangyuhao06/2022-adhoc | main.py | [
{
"identifier": "Environment",
"path": "src/env.py",
"snippet": "class Environment():\n #初始化环境\n def __init__(self):\n #初始数据-最大节点数\n self.node_max=NODE_MAX\n self.node_space_size=NODE_MAX\n self.node_moving_area=MOV_AREA\n #初始化二维平面\n self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)\n self.position=0\n #初始化随机相邻矩阵\n self.topology = np.zeros((self.node_space_size,self.node_space_size))\n self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))\n for i in range(self.node_max):\n self.topology[i,i] = 1\n for j in range(self.node_max):\n #构建双向图\n if self.topology[i,j] == 1:\n self.topology[j,i] = 1\n #初始化节点动作空间\n self.topology_actSpace=[]\n #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) \n self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))\n self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])\n #初始化传输事件列表\n self.trans_task_ID_inTR=[]\n self.trans_task_list=[]\n self.trans_task_cnt=0 # id计数器\n #初始化包列表\n self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化\n self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化\n self.pack_use_cnt=0#包序号计数器\n self.packets_list=[]#包列表\n self.packets_live_id=[]\n #初始化节点列表\n self.node_list=[]\n self.live_node_ID_list=[]\n for i in range(self.node_max):\n locals()['node_'+str(i)] = Node(i)\n self.node_list.append(locals()['node_'+str(i)])\n self.live_node_ID_list.append(i)\n #噪声系数\n self.noise_list = np.random.rayleigh(1,MAX_TIME*2)#*NOISE_CONST/2\n #统计参数\n self.envTr_time=0\n self.allNode_pw=0\n self.allNode_delay=0\n self.time_avg=0\n self.arrive_time=1\n self.end=0\n self.terminate=0\n \n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)#节点作为 |源节点| 发的包成功到达数\n self.agent_arrive.append(0)#节点作为 |最后一个中间节点| 发的包成功到达数\n # self.sum_packet_done_rate=0\n #四元组\n self.all_ob=np.array([[0]*OBS_LEN]*NODE_MAX)\n self.reward=np.array([1]*self.node_max)\n self.para_reward=np.array([1]*self.node_max) \n \n def generate_packet(self,cur_time):\n packetsList_temp=[]\n packets_cnt=self.amount_poisson_list[cur_time]\n for i in range(packets_cnt):\n nodes_temp = random.sample(self.live_node_ID_list,2)\n locals()['packet_'+str(self.pack_use_cnt)]=Packet(self.pack_use_cnt,abs(self.size_normal_list[self.pack_use_cnt])+8,nodes_temp[0],nodes_temp[1],cur_time)\n self.packets_list.append(locals()['packet_'+str(self.pack_use_cnt)])\n self.packets_live_id.append(self.pack_use_cnt)\n packetsList_temp.append(locals()['packet_'+str(self.pack_use_cnt)])\n self.node_list[nodes_temp[0]].packets_ToSend_id.append(self.pack_use_cnt)\n self.node_list[nodes_temp[0]].packets_id_list.append(self.pack_use_cnt)\n self.pack_use_cnt+=1\n return packetsList_temp\n \n #传输任务更新\n def trans_task_update(self,cur_time):\n \n if len(self.trans_task_ID_inTR)>0 and len(self.trans_task_list)>0:\n #所有在传传输任务\n for trans_temp_id in self.trans_task_ID_inTR:\n task_finish=self.trans_task_list[trans_temp_id].Trans_task_update()\n node_send_id,node_rec_id,packet_id=self.trans_task_list[trans_temp_id].show_info()\n #包传输更新\n self.packets_list[packet_id].time_use+=1\n #节点更新\n # self.node_list[node_send_id].next_hop_id=node_rec_id\n if node_send_id!=node_rec_id: \n self.node_list[node_send_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[0])\n self.node_list[node_send_id].current_power_send=self.trans_task_list[trans_temp_id].power_consume[0]\n self.node_list[node_send_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[0]\n\n self.node_list[node_rec_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[1])\n self.node_list[node_rec_id].current_power_receive=self.trans_task_list[trans_temp_id].power_consume[1]\n self.node_list[node_rec_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[1]\n #统计参数更新\n self.envTr_time+=1\n \n #trans任务完成更新\n if task_finish and self.topology[node_send_id,node_rec_id]==1 :\n #更新包与节点\n # T-T清除\n self.trans_task_ID_inTR.remove(trans_temp_id)\n # 包属性清除\n self.packets_list[packet_id].in_TR=0\n self.packets_list[packet_id].cur_trans_task_id=0\n self.packets_list[packet_id].cur_node_id=node_rec_id\n # 发送节点属性清除\n self.node_list[node_send_id].packets_ToSend_id.remove(packet_id)\n self.node_list[node_send_id].trans_task_send.get()\n self.node_list[node_send_id].sending_flag=0\n self.node_list[node_send_id].current_amp_send=0\n self.node_list[node_send_id].current_power_send=0\n # 接收节点属性清除\n self.node_list[node_rec_id].trans_taskID_rec.remove(trans_temp_id)\n if len(self.node_list[node_rec_id].trans_taskID_rec)==0:\n self.node_list[node_rec_id].rec_flag=0\n # self.node_list[node_rec_id].current_amp_receive=0\n self.node_list[node_rec_id].current_power_receive=0\n # 频谱环境更新(频谱块release)\n freqB_ID_now=0\n for freqB_ocp_now in self.trans_task_list[trans_temp_id].FreqB_occup:\n if freqB_ocp_now and node_send_id!=node_rec_id:\n self.freqB_list[freqB_ID_now].remove(node_send_id)\n freqB_ID_now+=1\n\n #判断是否到达目的地 \n if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id and self.topology[node_send_id,node_rec_id]==1:\n # 可通信到达\n self.packets_list[packet_id].arrive_flag=1\n self.packets_live_id.remove(packet_id)\n ### 记录接受节点和发出节点的奖励 ###\n self.packet_arrive_success[self.packets_list[packet_id].ori_node_id]+=1\n self.agent_arrive[node_send_id]+=1 \n # self.arrive_time += self.trans_task_list[trans_temp_id].time_use # datacheck3\n self.arrive_success += 1\n elif self.topology[node_send_id,node_rec_id]==1 :\n #可通信没到达\n self.node_list[node_rec_id].packets_ToSend_id.append(packet_id)\n # self.arrive_time += (cur_time - self.packets_list[packet_id].time_start) # datacheck3\n else:\n #不可通信\n self.trans_task_list[trans_temp_id].time_cnt=0\n self.trans_task_list[trans_temp_id].finish_flag=0\n # for packet_id in self.packets_live_id:\n # #判断是否到达目的地 \n # if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id or self.packets_list[packet_id].arrive_flag==1:\n # #到达\n # continue\n # # self.arrive_time += self.trans_task_list[trans_temp_id].time_use\n # else:#没到达\n # self.arrive_time += 1\n self.arrive_time += len(self.packets_live_id)\n \n \n \n def all_agent_observe(self): \n all_ob=[]\n # fBlst=[0,0,0,0,0,0,0,0,0,0]\n degree=0\n pack_storage=0\n pw_avg_all=0\n dst_node=-1\n\n # for node_id in range(self.node_max):\n # if len (self.node_list[node_id].packets_ToSend_id):\n # packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0]\n # dst_node=self.packets_list[packet_toSend_id].dst_node_id\n \n # else:\n # dst_node=-1\n \n # for node_id in self.live_node_ID_list:\n # for node_id in range(self.node_max):\n # fb_tp_id=0\n # for fb_tp in self.node_list[node_id].current_freqB:\n # fBlst[fb_tp_id]=fb_tp\n # fb_tp_id+=1\n \n # for node_id in self.live_node_ID_list:\n #neibor_idlist=self.node_list[node_id].neibor_idlist[:]#深复制\n #receive ob?\n #neibor_idlist.append(node_id)\n #neibor_vector=[]\n #for i in neibor_idlist:\n \n # for node_id in range(self.node_max):\n # pwl=self.node_list[node_id].power_list\n # if len(pwl)>=BACKTIME:\n # pwlst=pwl[len(pwl)-BACKTIME:len(pwl)]\n # else:\n # pwlst=pwl \n # if len(pwlst)>0:\n # pw_avg=sum(pwlst)/len(pwlst)\n # else:\n # pw_avg=0\n # pw_avg_all+=pw_avg\n \n for node_id in range(self.node_max):\n pwl=self.node_list[node_id].power_list\n if len(pwl)>=BACKTIME:\n pwlst=pwl[len(pwl)-BACKTIME:len(pwl)]\n else:\n pwlst=pwl \n if len(pwlst)>0:\n pw_avg=sum(pwlst)/len(pwlst)\n else:\n pw_avg=0\n \n if len (self.node_list[node_id].packets_ToSend_id)>0:\n packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0]\n dst_node=self.packets_list[packet_toSend_id].dst_node_id \n else:\n dst_node=-1\n \n pw=[]\n pw.append(pw_avg)\n \n dgr=[]\n degree=len(self.topology_actSpace[node_id][0])-1\n dgr.append(degree)\n \n pcs=[]\n pack_storage=len(self.node_list[node_id].packets_ToSend_id)\n pcs.append(pack_storage)\n \n dn=[]\n dn.append(dst_node)\n \n all_ob.append(pw+dgr+pcs+dn)\n #self.node_list[node_id].ob_send=neibor_vector\n \n return np.array(all_ob)\n \n \n # def generate_trans_task(self,trans_id,send_node,rec_node,packet):\n # trans_task_temp=Trans_task(trans_id,send_node,rec_node,packet)\n # return trans_task_temp\n \n def env_check_right(self):\n for node_id in self.live_node_ID_list:\n if self.node_list[node_id].trans_task_send.empty():\n assert self.node_list[node_id].sending_flag == 0\n elif not self.node_list[node_id].trans_task_send.empty():\n assert self.node_list[node_id].sending_flag == 1\n st_temp=self.node_list[node_id].trans_task_send.get()\n self.node_list[node_id].trans_task_send.put(st_temp)#无损使用队列内容\n s_node_send_id,s_node_rec_id,s_packet_id=st_temp.show_info()\n assert node_id==s_node_send_id\n # assert self.node_list[node_id].next_hop_id==s_node_rec_id\n assert self.node_list[node_id].packets_ToSend_id[0]==s_packet_id\n \n elif self.node_list[node_id].trans_task_rec.empty():\n assert self.rec_flag == 0\n elif not self.node_list[node_id].trans_task_rec.empty():\n assert self.node_list[node_id].rec_flag == 1\n rt_temp=self.node_list[node_id].trans_task_rec.get()\n self.node_list[node_id].trans_task_rec.put(rt_temp)#无损使用队列内容\n r_node_send_id,r_node_rec_id,r_packet_id=rt_temp.show_info()\n assert node_id==r_node_rec_id\n # assert self.node_list[node_id].next_hop_id==s_node_rec_id\n assert self.node_list[node_id].packets_ToSend_id[0] != r_packet_id\n \n return 0 \n \n\n def topology_update(self,cur_time,rand_change):\n self.topology = np.zeros((NODE_MAX,NODE_MAX))\n ################--------随机更改拓扑结构--------################\n if rand_change:\n positions=next(self.geo_area)\n self.position = positions\n for a in range(NODE_MAX):\n for b in range(NODE_MAX):\n if np.linalg.norm(positions[a]-positions[b]) <= COM_RANGE:\n self.topology[a,b]=1\n self.topology[b,a]=1\n else:\n self.topology[a,b]=0\n self.topology[b,a]=0\n # if np.random.rand()<DELTA and cur_time%30==0:\n # for i in np.random.randint(0,self.node_max,np.random.randint(3)+1):\n # self.topology[i,:]=np.random.randint(0,2,self.node_max)\n # self.topology[i,i] = 1\n # for j in range(self.node_max):\n # #构建双向图\n # if self.topology[i,j] == 1:\n # self.topology[j,i] = 1\n # print(positions)\n # print(\"****************\")\n # print(self.topology)\n # print(\"------------------------------------\")\n ################--------更新邻域--------################\n self.live_node_ID_list=[]\n self.topology_actSpace=[]\n for i in range(self.topology.shape[0]):\n if any(self.topology[i,:]):\n TPtemp = np.nonzero(self.topology[i,:])\n # self.node_list[i].neibor_idlist=TPtemp\n self.topology_actSpace.append(TPtemp)\n self.live_node_ID_list.append(i)\n else:\n TPtemp = -1\n self.topology_actSpace.append(TPtemp)\n return self.topology\n \n \n def get_state_reward(self):\n \n return self.topology,self.all_ob,self.reward \n \n def time_step(self,cur_time,action):\n \n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)\n self.agent_arrive.append(0)\n self.arrive_success=0\n \n # self.env_check_right()\n topology_now=self.topology_update(cur_time,1)\n self.generate_packet(cur_time)\n self.all_ob=self.all_agent_observe()\n self.trans_task_update(cur_time)\n for node_index in self.live_node_ID_list :\n if len(self.node_list[node_index].packets_ToSend_id)>0 and self.node_list[node_index].sending_flag!=1:\n packet_toSend_id=self.node_list[node_index].packets_ToSend_id[0]\n #包未到达且非在传----->生成trans_task\n if self.packets_list[packet_toSend_id].arrive_flag==0 and self.packets_list[packet_toSend_id].in_TR==0:\n #传输和接收节点决策\n send_node=self.node_list[node_index]\n Action=action[node_index]#######################################################\n next_hop_id,current_freqB,current_amp_send=Action[0],Action[1:N_ACTION_C],Action[N_ACTION_C]\n send_node.next_hop_id=next_hop_id \n rec_node=self.node_list[next_hop_id]\n current_amp_rec=RECAMP\n \n self.node_list[node_index].current_freqB=current_freqB\n self.node_list[node_index].next_hop_id=next_hop_id\n self.node_list[node_index].current_amp_send=current_amp_send\n #频谱环境更新\n freqB_ID_now=0\n for fB_ocp in current_freqB:\n if node_index!=next_hop_id and fB_ocp:\n self.freqB_list[freqB_ID_now].append(node_index)\n self.freqB_use_history[freqB_ID_now].append(node_index)\n freqB_ID_now+=1\n #T-T生成与T-T环境更新\n trans_task_now=Trans_task(self.trans_task_cnt,send_node,rec_node,self.packets_list[packet_toSend_id])\n trans_task_now.SNR_C=self.SNR_cac_update(cur_time,trans_task_now,current_amp_send,current_freqB,current_amp_rec)\n trans_task_now.time_use=int(trans_task_now.packsize/(trans_task_now.SNR_C[1]))+1\n \n if node_index==next_hop_id:\n trans_task_now.time_use=1#节点内部等待\n \n #节点与包写入\n #发送节点任务、标志更新\n self.node_list[node_index].trans_task_send.put_nowait(trans_task_now)\n self.node_list[node_index].sending_flag=1\n #接收节点任务、标志更新\n self.node_list[next_hop_id].trans_taskID_rec.append(trans_task_now.id)\n self.node_list[next_hop_id].rec_flag=1\n #包任务、标志更新\n self.packets_list[packet_toSend_id].cur_trans_task_id=self.trans_task_cnt\n self.packets_list[packet_toSend_id].in_TR=1\n #T-T环境写入\n self.trans_task_ID_inTR.append(trans_task_now.id)\n self.trans_task_list.append(trans_task_now)\n self.trans_task_cnt+=1\n #reward清算\n #总传输时间为self.envTr_time,总时间为cur_time\n packet_done_rate=1-round((len(self.packets_live_id)+0.1)/(len(self.packets_list)+0.1),4)#包传输完成率为packet_done_rate\n # self.avg_packet_done_rate += packet_done_rate\n # self.time_avg+=self.envTr_time/(1+len(self.packets_list)-len(self.packets_live_id))\n # self.time_avg+=packet_done_rate\n # self.time_avg+=self.arrive_time/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate))\n \n # print(\"pdr: \"+str(packet_done_rate))\n if packet_done_rate<=0.03:\n packet_done_rate=0.03\n \n self.time_avg+=self.arrive_time/(1+len(self.packets_list)-len(self.packets_live_id))#*(packet_done_rate)\n \n # if len(self.packets_live_id) == 0:\n # self.terminate=1\n if len(self.packets_live_id) == 0:\n self.end=1\n \n for i in range(self.node_max):\n # pw_sum=sum(self.node_list[i].power_list)\n if len(self.node_list[i].power_list)>0:\n pw_now=self.node_list[i].power_list[-1]+1\n else:\n pw_now=1\n if not self.node_list[i].trans_task_send.empty():\n st_temp=self.node_list[i].trans_task_send.get()\n self.node_list[i].trans_task_send.put_nowait(st_temp)#无损使用队列内容\n trans_delay=st_temp.time_use+1\n else:\n trans_delay=1\n \n self.para_reward[i]= -trans_delay*pw_now\n # if packet_done_rate<=0.05:\n # packet_done_rate=0.05\n # self.reward[i]=round((packet_done_rate*cur_time*cur_time*self.node_max*DEVICE_ENERGY+0.0001)/(pw_sum*self.envTr_time+0.0001),6)\n # self.reward[i]=round((packet_done_rate*cur_time*self.node_max*DEVICE_ENERGY+1000)/(pw_sum+1000),6)\n # self.reward[i]=round(-(pw_sum*self.envTr_time)/((packet_done_rate+0.1)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD2\n # self.reward[i]=round(-(10*pw_sum*self.envTr_time)/((packet_done_rate)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD3\n # self.reward[i]=round(-(pw_sum*self.envTr_time)/((packet_done_rate+0.001)*(cur_time+1)*(cur_time+1)*self.node_max),6)\n # self.reward[i]=round(-(10*pw_sum*self.envTr_time)/((packet_done_rate)*(cur_time+1)*(cur_time+1)*self.node_max),6)#RWD3\n #self.reward[i]=round(-(10*log10(10+pw_sum)*self.arrive_time)/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate)*(cur_time+1)*self.node_max),6)#RWD4\n # self.reward[i]=round(-(10*pw_sum*self.arrive_time)/((1+len(self.packets_list)-len(self.packets_live_id))*(packet_done_rate)*(cur_time+1)*self.node_max),6)#RWD5\n # self.reward[i]=-(10*log10(10+pw_sum)*(self.arrive_time+1))/((1+len(self.packets_list))*(packet_done_rate)*(cur_time+1)*self.node_max)#RWD6\n # self.reward[i]=self.arrive_success*1000-(self.arrive_time/(1+len(self.packets_list))*log10(10+pw_sum/(cur_time+1)))#RWD7\n # self.reward[i]=self.terminate*10000+self.arrive_success*300-100*self.arrive_time/(1+len(self.packets_live_id))/len(self.packets_list)#RWD8\n # self.reward[i]=(self.terminate*10000+self.arrive_success*1000-len(self.packets_live_id))/self.node_max#RWD9\n \n # ###RWD10###\n # if self.agent_arrive[i]==0 and self.packet_arrive_success[i]==0 and self.terminate!=1 :\n # self.reward[i] = - len(self.node_list[i].packets_ToSend_id) + 10000*self.end#*(1-self.terminate) #等待时延\n # elif self.agent_arrive[i]>0 or self.packet_arrive_success[i]>0 and self.terminate!=1:\n # # self.reward[i]=1000*self.agent_arrive[i]+1000*self.packet_arrive_success[i]+10000*self.terminate\n # self.reward[i] = 1000*self.agent_arrive[i] + 10000*self.end#*(1-self.terminate) \n # elif self.terminate:\n # self.reward[i] = 2000\n # # self.reward[i] = 1000*self.agent_arrive[i] + 1000*self.packet_arrive_success[i] -len(self.node_list[i].packets_ToSend_id)\n # ###########\n \n ###RWD11###\n if self.agent_arrive[i]==0 and self.packet_arrive_success[i]==0 :\n self.reward[i] = (- len(self.node_list[i].packets_ToSend_id) - 100*trans_delay) #等待时延 + 传输时延\n elif self.agent_arrive[i]>0 or self.packet_arrive_success[i]>0 :\n self.reward[i] = 1000*self.agent_arrive[i] #+ self.para_reward[i] #本节点作为 |最后一个中间节点| 发的包成功到达数 \n ###########\n \n self.allNode_delay+=trans_delay\n self.allNode_pw+=round(pw_now,6)\n \n if len(self.packets_live_id) == 0:\n self.terminate=1\n \n \n # self.time_avg+=self.envTr_time/(1+len(self.packets_list)-len(self.packets_live_id))*(cur_time+1)\n print(\"pdr: \"+str(packet_done_rate)+\" rwd: \"+str(sum(self.reward)))\n return topology_now,self.all_ob,self.reward,self.para_reward,self.terminate\n\n \n \n def SNR_cac_update(self,cur_time,trans_task,current_amp_send,current_freqB,current_amp_rec):\n trans_task_temp=trans_task\n node_send_id,node_rec_id,packet_id=trans_task_temp.show_info()\n trans_energy=round(current_amp_send*current_amp_rec*self.packets_list[packet_id].size*PACKENERGY,6)\n noise=NOISE_CONST\n SINR_fB=[]\n Capacity=1\n node_range = np.linalg.norm(self.position[node_send_id]-self.position[node_rec_id])\n for fB_id in range(len(current_freqB)):\n inference_temp=noise\n if current_freqB[fB_id]:\n node_list_temp=self.freqB_list[fB_id]\n for i in node_list_temp:\n if i==node_send_id:\n continue\n elif i in self.topology_actSpace[node_rec_id][0]:\n if self.node_list[i].sending_flag==1:\n ts_ttemp=self.node_list[i].trans_task_send.get_nowait()\n oth_node_send_id,oth_node_rec_id,oth_packet_id=ts_ttemp.show_info()\n inference_temp+=round(self.node_list[oth_node_send_id].current_amp_send*RECAMP*self.packets_list[oth_packet_id].size*PACKENERGY,6)\n self.node_list[i].trans_task_send.put_nowait(ts_ttemp)#无损使用队列内容\n Sinr=round(trans_energy*(10**(-abs(self.noise_list[cur_time])))*10**(-node_range/100/COM_RANGE)/inference_temp,6)\n Capacity+=round(8*4*log2(1+Sinr),6)\n SINR_fB.append(Sinr)\n return (SINR_fB,Capacity)\n \n \n def reset(self):\n #初始化二维平面\n self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)\n #初始化随机相邻矩阵\n self.topology = np.zeros((self.node_space_size,self.node_space_size))\n self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))\n for i in range(self.node_max):\n self.topology[i,i] = 1\n for j in range(self.node_max):\n #构建双向图\n if self.topology[i,j] == 1:\n self.topology[j,i] = 1\n #初始化节点动作空间\n self.topology_actSpace=[]\n #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) \n self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))\n self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])\n #初始化传输事件列表\n self.trans_task_ID_inTR=[]\n self.trans_task_list=[]\n self.trans_task_cnt=0 # id计数器\n #初始化包列表\n self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化\n self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化\n self.pack_use_cnt=0#包序号计数器\n self.packets_list=[]#包列表\n self.packets_live_id=[]\n #初始化节点列表\n self.node_list=[]\n self.live_node_ID_list=[]\n for i in range(self.node_max):\n locals()['node_'+str(i)] = Node(i)\n self.node_list.append(locals()['node_'+str(i)])\n self.live_node_ID_list.append(i)\n #统计参数\n self.envTr_time=0\n self.allNode_pw=0\n self.allNode_delay=0\n self.time_avg=0\n self.arrive_time=1\n # self.arrive_success=0\n self.terminate=0\n self.end=0\n self.packet_arrive_success=[]\n self.agent_arrive=[]\n for i in range(NODE_MAX):\n self.packet_arrive_success.append(0)\n self.agent_arrive.append(0)\n #四元组\n self.all_ob=np.array([[0]*OBS_LEN]*NODE_MAX)\n self.reward=np.array([1]*self.node_max) "
},
{
"identifier": "Node",
"path": "src/node.py",
"snippet": "class Node(object):\n def __init__(self,id_node):\n super(Node, self).__init__()\n #multi-agent sys setting\n self.node_max=36\n self.act_range=self.node_max-1 #最大邻居范围\n # current agent-property setting\n self.id=id_node#该节点id\n # 1 - packets\n self.packets_ToSend_id=[]#该节点当前待传的包\n self.packets_id_list=[]#该节点至今为止保存过的包id\n \n self.sending_flag=0\n self.rec_flag=0\n \n self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务\n self.trans_taskID_rec=[]#该节点当前接收的任务\n # 2 - energy\n self.current_amp_send=0#节点当前发送增益--------动作\n #self.current_amp_receive=0#节点当前接收增益--------动作\n \n self.current_power_send=0#节点当前发送功率\n self.current_power_receive=0#节点当前接收功率\n self.power_list=[]#节点使用能量记录\n \n self.energy_consumption=0#截至现在能量消耗\n # 3 - freq\n self.current_freqB=[1]#当前选用频谱块--------动作\n self.freqB_list=[1]#频谱块历史\n # 4 - topology\n self.neibor_idlist=[]\n self.next_hop_id=-1#下一条节点id--------动作\n # 5 - observation\n #self.ob_send=[]\n \n # def observation_rec(self,send_node):\n # if len(self.ob_send)==0 or len(send_node.ob_send)==0 :\n # raise ValueError(\"send observation unfinished\")\n # self.ob_rec.append(self.ob_send[-1])\n # self.ob_rec.append(send_node.ob_send[-1])\n # return self.ob_rec\n \n \n def get_send_action(self,ob,action_space):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_send,self.current_freqB,self.next_hop_id\n \n def get_rec_action(self,ob):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_receive "
},
{
"identifier": "Packet",
"path": "src/packet.py",
"snippet": "class Packet(object):\n def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):\n super(Packet, self).__init__()\n self.id=id_packet\n self.size=packet_size\n #节点属性\n self.ori_node_id=ori_node_id\n self.cur_node_id=ori_node_id\n self.dst_node_id=dst_node_id\n self.node_list=[ori_node_id]\n #T-T属性\n self.cur_trans_task_id=-100\n self.in_TR=0\n self.trans_task_IDlist=[]\n #路由属性\n self.time_start=time_start_0\n self.time_use=0\n self.arrive_flag=0\n \n def packet_trans_update(self,trans_task):\n if trans_task.trans_property[2]!=self.id:\n raise ValueError('trans_task not matched')\n self.cur_trans_task_id=trans_task.id"
},
{
"identifier": "Trans_task",
"path": "src/transtask.py",
"snippet": "class Trans_task(object):\n def __init__(self,trans_id,node_send,node_rec,packet):\n self.id=trans_id\n self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性\n self.packsize=packet.size\n ####frequency block info####\n self.FreqB_occup=node_send.current_freqB #占用频谱块id\n ####SINR and Capacity####\n self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]\n ####time of trans####\n self.time_use=1#int(self.packsize/self.SNR_C[1])+1\n self.time_cnt=0\n self.finish_flag=0\n ####energy setting####\n self.energy_property = (node_send.current_amp_send,RECAMP)\n self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)\n self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))\n \n def show_info(self):\n return self.trans_property[0],self.trans_property[1],self.trans_property[2]\n \n def Trans_task_update(self):\n if self.finish_flag:\n return 1\n if self.time_cnt>=self.time_use:\n self.finish_flag=1\n return 1\n elif self.time_cnt<self.time_use:\n self.time_cnt+=1\n return 0\n \n \n #trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)\n #tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)"
},
{
"identifier": "DGN",
"path": "src/DGN.py",
"snippet": "class DGN(nn.Module):\n\tdef __init__(self,n_agent,num_inputs,hidden_dim,num_actions):\n\t\tsuper(DGN, self).__init__()\n\t\t\n\t\tself.encoder = Encoder(num_inputs,hidden_dim)\n\t\tself.att_1 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)\n\t\t# self.att_2 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim)\n\t\tself.q_net = Q_Net(hidden_dim,num_actions)\n\t\t\n\tdef forward(self, x, mask):\n\t\th1 = self.encoder(x)\n\t\th2 = self.att_1(h1, mask)\n\t\t# h3 = self.att_2(h2, mask)\n\t\tq = self.q_net(h2)\n\t\treturn q "
},
{
"identifier": "DPG",
"path": "src/DGN.py",
"snippet": "class DPG(nn.Module):\n def __init__(self,n_agent,din=11,hidden_dim=128,dout=11,init_w = 3e-3):\n super(DPG, self).__init__()\n self.fc1 = nn.Linear(din, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim,dout) \n # uniform_将tensor用从均匀分布中抽样得到的值填充。参数初始化\n self.fc2.weight.data.uniform_(-init_w, init_w)\n # nn.init.uniform_(self.linear3.weight,(-init_w,init_w))\n #也用normal_(0, 0.1) 来初始化的,高斯分布中抽样填充,这两种都是比较有效的初始化方式\n self.fc2.bias.data.uniform_(-init_w, init_w)\n #其意义在于我们尽可能保持 每个神经元的输入和输出的方差一致。\n def forward(self, x):\n h = F.relu(self.fc1(x))\n h2 = F.relu(self.fc2(h))\n return torch.tanh(h2)"
},
{
"identifier": "ReplayBuffer",
"path": "src/buffereplay.py",
"snippet": "class ReplayBuffer(object):\n\n\tdef __init__(self, buffer_size):\n\t\tself.buffer_size = buffer_size\n\t\tself.num_experiences = 0\n\t\tself.buffer = deque()\n\n\tdef getBatch(self, batch_size):\n\t\tif self.num_experiences < batch_size:\n\t\t\treturn random.sample(self.buffer, self.num_experiences)\n\t\telse:\n\t\t\treturn random.sample(self.buffer, batch_size)\n\n\tdef add(self, obs, action, reward, new_obs, matrix, next_matrix, para_reward, terminate):\n\t\texperience = (obs, action, reward, new_obs, matrix, next_matrix, para_reward, terminate)\n\t\tif self.num_experiences < self.buffer_size:\n\t\t\tself.buffer.append(experience)\n\t\t\tself.num_experiences += 1\n\t\telse:\n\t\t\tself.buffer.popleft()\n\t\t\tself.buffer.append(experience)"
}
] | from src.env import Environment
from src.node import Node
from src.packet import Packet
from src.transtask import Trans_task
from src.DGN import DGN,DPG
from src.parameter import *
from src.buffereplay import ReplayBuffer
from queue import Queue
import math
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F | 9,342 | os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
| os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
| env=Environment() | 0 | 2023-12-30 09:35:30+00:00 | 12k |
alshubati99/BeamEye | uiElements/uiHandler 3.py | [
{
"identifier": "TkinterVideo",
"path": "uiElements/tkVideoPlayer.py",
"snippet": "class TkinterVideo(tk.Label):\n\n\tdef __init__(self, master, scaled: bool = True, consistant_frame_rate: bool = True, keep_aspect: bool = False,\n\t\t\t\t *args, **kwargs):\n\t\tsuper(TkinterVideo, self).__init__(master, *args, **kwargs)\n\n\t\tself.path = \"\"\n\t\tself._load_thread = None\n\n\t\tself._paused = True\n\t\tself._stop = True\n\n\t\tself.consistant_frame_rate = consistant_frame_rate # tries to keep the frame rate consistant by skipping over a few frames\n\n\t\tself._container = None\n\n\t\tself._current_img = None\n\t\tself._current_frame_Tk = None\n\t\tself._frame_number = 0\n\t\tself._time_stamp = 0\n\n\t\tself._current_frame_size = (0, 0)\n\n\t\tself._seek = False\n\t\tself._seek_sec = 0\n\n\t\tself._video_info = {\n\t\t\t\"duration\": 0, # duration of the video\n\t\t\t\"framerate\": 0, # frame rate of the video\n\t\t\t\"framesize\": (0, 0) # tuple containing frame height and width of the video\n\n\t\t}\n\n\t\tself.set_scaled(scaled)\n\t\tself._keep_aspect_ratio = keep_aspect\n\t\tself._resampling_method: int = Image.NEAREST\n\n\t\tself.bind(\"<<Destroy>>\", self.stop)\n\t\tself.bind(\"<<FrameGenerated>>\", self._display_frame)\n\n\tdef keep_aspect(self, keep_aspect: bool):\n\t\t\"\"\" keeps the aspect ratio when resizing the image \"\"\"\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\tdef set_resampling_method(self, method: int):\n\t\t\"\"\" sets the resampling method when resizing \"\"\"\n\t\tself._resampling_method = method\n\n\tdef set_size(self, size: Tuple[int, int], keep_aspect: bool = False):\n\t\t\"\"\" sets the size of the video \"\"\"\n\t\tself.set_scaled(False, self._keep_aspect_ratio)\n\t\tself._current_frame_size = size\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\tdef _resize_event(self, event):\n\n\t\tself._current_frame_size = event.width, event.height\n\n\t\tif self._paused and self._current_img and self.scaled:\n\t\t\tif self._keep_aspect_ratio:\n\t\t\t\tproxy_img = ImageOps.contain(self._current_img.copy(), self._current_frame_size)\n\n\t\t\telse:\n\t\t\t\tproxy_img = self._current_img.copy().resize(self._current_frame_size)\n\n\t\t\tself._current_imgtk = ImageTk.PhotoImage(proxy_img)\n\t\t\tself.config(image=self._current_imgtk)\n\n\tdef set_scaled(self, scaled: bool, keep_aspect: bool = False):\n\t\tself.scaled = scaled\n\t\tself._keep_aspect_ratio = keep_aspect\n\n\t\tif scaled:\n\t\t\tself.bind(\"<Configure>\", self._resize_event)\n\n\t\telse:\n\t\t\tself.unbind(\"<Configure>\")\n\t\t\tself._current_frame_size = self.video_info()[\"framesize\"]\n\n\tdef _set_frame_size(self, event=None):\n\t\t\"\"\" sets frame size to avoid unexpected resizing \"\"\"\n\n\t\tself._video_info[\"framesize\"] = (\n\t\tself._container.streams.video[0].width, self._container.streams.video[0].height)\n\n\t\tself.current_imgtk = ImageTk.PhotoImage(Image.new(\"RGBA\", self._video_info[\"framesize\"], (255, 0, 0, 0)))\n\t\tself.config(width=150, height=100, image=self.current_imgtk)\n\n\tdef _load(self, path):\n\t\t\"\"\" load's file from a thread \"\"\"\n\n\t\tcurrent_thread = threading.current_thread()\n\n\t\ttry:\n\t\t\twith av.open(path) as self._container:\n\n\t\t\t\tself._container.streams.video[0].thread_type = \"AUTO\"\n\n\t\t\t\tself._container.fast_seek = True\n\t\t\t\tself._container.discard_corrupt = True\n\n\t\t\t\tstream = self._container.streams.video[0]\n\n\t\t\t\ttry:\n\t\t\t\t\tself._video_info[\"framerate\"] = int(stream.average_rate)\n\t\t\t\t\tprint(self._video_info[\"framerate\"] , int(stream.average_rate))\n\n\t\t\t\texcept TypeError:\n\t\t\t\t\traise TypeError(\"Not a video file\")\n\n\t\t\t\ttry:\n\n\t\t\t\t\tself._video_info[\"duration\"] = float(stream.duration * stream.time_base)\n\t\t\t\t\tself.event_generate(\"<<Duration>>\") # duration has been found\n\n\t\t\t\texcept (TypeError, tk.TclError): # the video duration cannot be found, this can happen for mkv files\n\t\t\t\t\tpass\n\n\t\t\t\tself._frame_number = 0\n\n\t\t\t\tself._set_frame_size()\n\n\t\t\t\tself.stream_base = stream.time_base\n\n\t\t\t\ttry:\n\t\t\t\t\tself.event_generate(\"<<Loaded>>\") # generated when the video file is opened\n\n\t\t\t\texcept tk.TclError:\n\t\t\t\t\tpass\n\n\t\t\t\tnow = time.time_ns() // 1_000_000 # time in milliseconds\n\t\t\t\tthen = now\n\n\t\t\t\t#time_in_frame = (1 / self._video_info[\"framerate\"]) * 1000 # second it should play each frame\n\t\t\t\ttime_in_frame = 1000/self._video_info[\"framerate\"]\n\n\t\t\t\twhile self._load_thread == current_thread and not self._stop:\n\t\t\t\t\tif self._seek: # seek to specific second\n\t\t\t\t\t\tself._container.seek(self._seek_sec * 1000000, whence='time', backward=True,\n\t\t\t\t\t\t\t\t\t\t\t any_frame=False) # the seek time is given in av.time_base, the multiplication is to correct the frame\n\t\t\t\t\t\tself._seek = False\n\t\t\t\t\t\tself._frame_number = self._video_info[\"framerate\"] * self._seek_sec\n\n\t\t\t\t\t\tself._seek_sec = 0\n\n\t\t\t\t\tif self._paused:\n\t\t\t\t\t\ttime.sleep(0.0001) # to allow other threads to function better when its paused\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnow = time.time()*1000 # time in milliseconds\n\t\t\t\t\tdelta = now - then # time difference between current frame and previous frame\n\t\t\t\t\tthen = now\n\n\t\t\t\t\t# print(\"Frame: \", frame.time, frame.index, self._video_info[\"framerate\"])\n\t\t\t\t\ttry:\n\t\t\t\t\t\tframe = next(self._container.decode(video=0))\n\n\t\t\t\t\t\tself._time_stamp = float(frame.pts * stream.time_base)\n\n\t\t\t\t\t\twidth = self._current_frame_size[0]\n\t\t\t\t\t\theight = self._current_frame_size[1]\n\t\t\t\t\t\tif self._keep_aspect_ratio:\n\t\t\t\t\t\t\tim_ratio = frame.width / frame.height\n\t\t\t\t\t\t\tdest_ratio = width / height\n\t\t\t\t\t\t\tif im_ratio != dest_ratio:\n\t\t\t\t\t\t\t\tif im_ratio > dest_ratio:\n\t\t\t\t\t\t\t\t\tnew_height = round(frame.height / frame.width * width)\n\t\t\t\t\t\t\t\t\theight = new_height\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tnew_width = round(frame.width / frame.height * height)\n\t\t\t\t\t\t\t\t\twidth = new_width\n\n\t\t\t\t\t\tself._current_img = frame.to_image(width=width, height=height, interpolation=\"FAST_BILINEAR\")\n\n\t\t\t\t\t\tself._frame_number += 1\n\n\t\t\t\t\t\tself.event_generate(\"<<FrameGenerated>>\")\n\n\t\t\t\t\t\tif self._frame_number % self._video_info[\"framerate\"] == 0:\n\t\t\t\t\t\t\tself.event_generate(\"<<SecondChanged>>\")\n\n\t\t\t\t\t\tif self.consistant_frame_rate:\n\t\t\t\t\t\t\t#time.sleep(max((time_in_frame - delta) / 1000, 0))\n\t\t\t\t\t\t\ttime.sleep(max((time_in_frame-delta)/520, 0))\n\t\t\t\t\t\t\t# was playing at x1.? the speed\n\t\t\t # compared the time it took to display 1 second of the video\n\t\t\t\t\t\t\t# with 1 real life second, found it was off by 0.519\n\t\t\t\t\t\t\t# ==> the video was playing twice as fast\n\t\t\t\t\texcept (StopIteration, av.error.EOFError, tk.TclError):\n\t\t\t\t\t\tbreak\n\n\t\t\t\ttry:\n\t\t\t\t\tself._container.close()\n\t\t\t\t# added\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\n\t\t\t# print(\"Container: \", self._container.c)\n\t\t\tif self._container:\n\t\t\t\tself._container.close()\n\t\t\t\tself._container = None\n\n\t\tfinally:\n\t\t\tself._cleanup()\n\t\t\tgc.collect()\n\n\tdef _cleanup(self):\n\t\tself._frame_number = 0\n\t\tself._paused = True\n\t\tself._stop = True\n\t\tif self._load_thread:\n\t\t\tself._load_thread = None\n\t\tif self._container:\n\t\t\tself._container.close()\n\t\t\tself._container = None\n\t\ttry:\n\t\t\tself.event_generate(\"<<Ended>>\")\n\t\texcept tk.TclError:\n\t\t\tpass\n\n\tdef load(self, path: str):\n\t\t\"\"\" loads the file from the given path \"\"\"\n\t\tself.stop()\n\t\tself.path = path\n\n\tdef stop(self):\n\t\t\"\"\" stops reading the file \"\"\"\n\t\tself._paused = True\n\t\tself._stop = True\n\t\tself._cleanup()\n\n\tdef pause(self):\n\t\t\"\"\" pauses the video file \"\"\"\n\t\tself._paused = True\n\n\tdef play(self):\n\t\t\"\"\" plays the video file \"\"\"\n\t\tself._paused = False\n\t\tself._stop = False\n\n\t\tif not self._load_thread:\n\t\t\t# print(\"loading new thread...\")\n\t\t\tself._load_thread = threading.Thread(target=self._load, args=(self.path,), daemon=True)\n\t\t\tself._load_thread.start()\n\n\tdef is_paused(self):\n\t\t\"\"\" returns if the video is paused \"\"\"\n\t\treturn self._paused\n\n\tdef video_info(self) -> Dict:\n\t\t\"\"\" returns dict containing duration, frame_rate, file\"\"\"\n\t\treturn self._video_info\n\n\tdef metadata(self) -> Dict:\n\t\t\"\"\" returns metadata if available \"\"\"\n\t\tif self._container:\n\t\t\treturn self._container.metadata\n\n\t\treturn {}\n\n\tdef current_frame_number(self) -> int:\n\t\t\"\"\" return current frame number \"\"\"\n\t\treturn self._frame_number\n\n\tdef current_duration(self) -> float:\n\t\t\"\"\" returns current playing duration in sec \"\"\"\n\t\treturn self._time_stamp\n\n\tdef current_img(self) -> Image:\n\t\t\"\"\" returns current frame image \"\"\"\n\t\treturn self._current_img\n\n\tdef _display_frame(self, event):\n\t\t\"\"\" displays the frame on the label \"\"\"\n\n\t\tif self.current_imgtk.width() == self._current_img.width and self.current_imgtk.height() == self._current_img.height:\n\t\t\tself.current_imgtk.paste(self._current_img)\n\t\telse:\n\t\t\tself.current_imgtk = ImageTk.PhotoImage(self._current_img)\n\t\tself.config(image=self.current_imgtk)\n\n\tdef seek(self, sec: int):\n\t\t\"\"\" seeks to specific time\"\"\"\n\n\t\tself._seek = True\n\t\tself._seek_sec = sec"
},
{
"identifier": "open_settings_window",
"path": "uiElements/SettingsWindow.py",
"snippet": "def open_settings_window(root=window):\n\t# settings will only be used in and by this function\n\twith open(uiElements + \"/userSettings.txt\", \"r\") as f:\n\t\tsettings = f.read()\n\t\tsettings = [line.split(\" \")[-1] for line in settings.split(\"\\n\")]\n\t\tinclude_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings\n\t\toutput_path = output_path.replace(\"_SPACE_\", \" \")\n\t\tinclude_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(\n\t\t\tinclude_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)\n\n\tcolor_dict = {1: \"#0094FF\", 2: \"#FF00F6\", 3: \"red\", 4: \"#FF6A00\", 5: \"yellow\", 6: \"#26FF5C\"}\n\tcolor_dict_key = 1\n\n\tdef pick_color():\n\t\tnonlocal color_dict_key\n\t\tcolor_dict_key += 1\n\t\treturn color_dict[color_dict_key - 1]\n\n\tglobal settings_opened\n\n\tsettings_opened = True\n\tsettings_window = tk.Toplevel(root, bg=\"#071F46\")\n\tsettings_window.geometry('450x600')\n\tsettings_window.title('Settings')\n\tsettings_window.iconbitmap(uiAssets + \"logo.ico\")\n\tsettings_window.configure()\n\tsettings_bg_color = \"#071F46\"\n\n\ttopy = -20\n\n\tdef increment_topy():\n\t\tnonlocal topy\n\t\ttopy += 60\n\t\treturn topy\n\n\tleft_x = 0\n\n\tdef increment_leftx():\n\t\tnonlocal left_x\n\t\tleft_x += 30\n\t\treturn left_x\n\n\ttop_font = (Lato, 18)\n\n\t# include labels\n\tdef include_labels_update():\n\t\tundo_saved()\n\t\tnonlocal include_labels\n\t\tinclude_labels += 1\n\t\tinclude_labels %= 2\n\t\tprint(include_labels, include_labels_box.get())\n\n\ttk.Label(settings_window, text='Include Labels', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\n\n\tinclude_labels_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t fg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t command=include_labels_update)\n\tif include_labels:\n\t\tinclude_labels_box.select()\n\telse:\n\t\tinclude_labels_box.deselect()\n\n\tinclude_labels_box.place(x=400, y=topy + 5)\n\n\t# include accuracy\n\tdef include_accuracy_update():\n\t\tundo_saved()\n\t\tnonlocal include_accuracy\n\t\tinclude_accuracy += 1\n\t\tinclude_accuracy %= 2\n\n\ttk.Label(settings_window, text='Include Accuracy', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\tinclude_accuracy_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t fg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t command=include_accuracy_update)\n\tif include_accuracy:\n\t\tinclude_accuracy_box.select()\n\telse:\n\t\tinclude_accuracy_box.deselect()\n\tinclude_accuracy_box.place(x=400, y=topy + 5)\n\n\t# crowd detect\n\tdef include_crowd_update():\n\t\tundo_saved()\n\t\tnonlocal include_crowd\n\t\tinclude_crowd += 1\n\t\tinclude_crowd %= 2\n\n\t# print(include_crowd)\n\n\ttk.Label(settings_window, text='Include Crowd Detection', font=top_font, foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=20, y=increment_topy())\n\tinclude_crowd_box = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\tfg_color=settings_bg_color, border_color=\"white\", hover_color=\"white\",\n\t\t\t\t\t\t\t\t\t\tcommand=include_crowd_update)\n\tif include_crowd:\n\t\tinclude_crowd_box.select()\n\telse:\n\t\tinclude_crowd_box.deselect()\n\n\tinclude_crowd_box.place(x=400, y=topy + 5)\n\ttopy += 10\n\t# pedestrian box color\n\tsame_color_error = tk.Label(settings_window, text='Pedestrian and Crowd colors can\\'t be similar', font=(Lato, 10),\n\t\t\t\t\t\t\t\tforeground=\"red\",\n\t\t\t\t\t\t\t\tbackground=\"#071F46\")\n\n\t# choosing colors\n\t# pedestrian box colors\n\tdef reset_pedestrian_checkboxes():\n\t\tundo_saved()\n\t\tnonlocal pd_color_checkBox_list, pedestrian_color, crowd_color, same_color_error\n\t\tsame_color_error.place_forget()\n\t\tpd_color_checkBox_list[pedestrian_color - 1].deselect()\n\t\tif all(not box.get() for box in pd_color_checkBox_list): # for no empty checkbox\n\t\t\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\n\t\telse:\n\t\t\tfor box in pd_color_checkBox_list:\n\t\t\t\tprint(box.get(), end=\", \")\n\t\t\t\tif box.get():\n\t\t\t\t\ttmp = pd_color_checkBox_list.index(box) + 1\n\t\t\t\t\tif tmp == crowd_color:\n\t\t\t\t\t\tsame_color_error.place(x=20, y=560)\n\t\t\t\t\t\tpd_color_checkBox_list[tmp - 1].deselect()\n\t\t\t\t\t\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\t\t\t\t\telse:\n\t\t\t\t\t\tpedestrian_color = tmp\n\n\ttk.Label(settings_window, text='Pedestrian Box Color', font=top_font, foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=20, y=increment_topy())\n\tincrement_topy()\n\n\tpd_color_checkBox_list = []\n\tpd_color_checkBox_one = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_one.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_one)\n\n\tpd_color_checkBox_two = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_two.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_two)\n\n\tpd_color_checkBox_three = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_three.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_three)\n\n\tpd_color_checkBox_four = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_four.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_four)\n\n\tpd_color_checkBox_five = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_five.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_five)\n\n\tpd_color_checkBox_six = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\tcommand=reset_pedestrian_checkboxes, )\n\tpd_color_checkBox_six.place(x=increment_leftx(), y=topy)\n\tpd_color_checkBox_list.append(pd_color_checkBox_six)\n\n\tpd_color_checkBox_list[pedestrian_color - 1].select()\n\tdefault_p = False\n\n\ttopy -= 20\n\tleft_x = 0\n\tcolor_dict_key = 1\n\n\t# crowd box color\n\tdef reset_crowd_checkboxes():\n\t\tundo_saved()\n\t\tnonlocal crowd_color_checkBox_list, crowd_color, default_c, pedestrian_color, same_color_error\n\t\tsame_color_error.place_forget()\n\t\tcrowd_color_checkBox_list[crowd_color - 1].deselect()\n\t\tif all(not box.get() for box in crowd_color_checkBox_list): # for no empty checkbox\n\t\t\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\n\t\telse:\n\t\t\tfor box in crowd_color_checkBox_list:\n\t\t\t\tprint(box.get(), end=\", \")\n\t\t\t\tif box.get():\n\t\t\t\t\ttmp = crowd_color_checkBox_list.index(box) + 1\n\t\t\t\t\tif tmp == pedestrian_color:\n\t\t\t\t\t\tsame_color_error.place(x=20, y=560)\n\t\t\t\t\t\tcrowd_color_checkBox_list[tmp - 1].deselect()\n\t\t\t\t\t\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\t\t\t\t\telse:\n\t\t\t\t\t\tcrowd_color = tmp\n\n\ttk.Label(settings_window, text='Crowd Box Color', font=top_font, foreground=\"white\", background=\"#071F46\").place(\n\t\tx=20, y=increment_topy())\n\tincrement_topy()\n\tcrowd_color_checkBox_list = []\n\tcrowd_color_checkBox_one = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_one.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_one)\n\tcrowd_color_checkBox_two = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_two.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_two)\n\tcrowd_color_checkBox_three = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_three.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_three)\n\tcrowd_color_checkBox_four = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_four.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_four)\n\tcrowd_color_checkBox_five = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\thover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_five.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_five)\n\tcrowd_color_checkBox_six = CTk.CTkCheckBox(settings_window, height=20, width=20, text=\"\", corner_radius=5,\n\t\t\t\t\t\t\t\t\t\t\t fg_color=pick_color(), border_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t hover_color=color_dict[color_dict_key - 1],\n\t\t\t\t\t\t\t\t\t\t\t command=reset_crowd_checkboxes)\n\tcrowd_color_checkBox_six.place(x=increment_leftx(), y=topy)\n\tcrowd_color_checkBox_list.append(crowd_color_checkBox_six)\n\n\tcrowd_color_checkBox_list[crowd_color - 1].select()\n\tdefault_c = False\n\n\t# output folder\n\tdef change_output_folder():\n\t\tundo_saved()\n\t\tnonlocal output_path, current_output_text\n\t\tsettings_window.withdraw()\n\t\toutput_path = filedialog.askdirectory()\n\t\tprint(\"got:\", output_path)\n\t\tdisp_output = output_path\n\t\tcurrent_output_text.configure(text=f'Current: {disp_output}')\n\t\tsettings_window.deiconify()\n\n\ttopy -= 20\n\tleft_x = 0\n\ttk.Label(settings_window, text='Output folder', font=top_font, foreground=\"white\", background=\"#071F46\").place(x=20,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y=increment_topy() + 20)\n\tdisplay_output = \"\\\\\" + output_path.replace(\"//\", \"\\\\\").replace('\"', \"\")\n\tcurrent_output_text = tk.Label(settings_window, text=f'Current: {display_output}', font=(Lato, 10),\n\t\t\t\t\t\t\t\t foreground=\"white\", background=\"#071F46\")\n\tcurrent_output_text.place(x=20, y=increment_topy())\n\toutput_folder_change_button = CTk.CTkButton(master=settings_window,\n\t\t\t\t\t\t\t\t\t\t\t\twidth=120,\n\t\t\t\t\t\t\t\t\t\t\t\theight=40,\n\t\t\t\t\t\t\t\t\t\t\t\tborder_width=2,\n\t\t\t\t\t\t\t\t\t\t\t\tborder_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t\t\tbg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t\t\tfg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t\t\tcorner_radius=8,\n\t\t\t\t\t\t\t\t\t\t\t\ttext=\"Change\",\n\t\t\t\t\t\t\t\t\t\t\t\tfont=(\"Lato\", 20),\n\t\t\t\t\t\t\t\t\t\t\t\tcommand=change_output_folder\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\toutput_folder_change_button.place(x=300, y=topy - 50)\n\n\t# output_folder_change_button.place(x = )\n\n\t# save settings\n\tdef undo_saved():\n\t\tnonlocal settings_save_button\n\t\tsettings_save_button.configure(text_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t text='Save',\n\t\t\t\t\t\t\t\t\t bg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t fg_color=\"white\",\n\t\t\t\t\t\t\t\t\t hover_color=\"#24EA3F\", )\n\n\tdef save_settings():\n\t\tnonlocal include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path, settings_save_button\n\t\toutput_path = output_path.replace(\" \", \"_SPACE_\")\n\t\tsettings = f\"labels {include_labels}\\ncrowd {include_crowd}\\naccuracy {include_accuracy}\\npedestrian_color {pedestrian_color}\\ncrowd_color {crowd_color}\\nout_dir {output_path}\"\n\t\tprint(settings)\n\t\twith open(uiElements + \"/userSettings.txt\", \"w\") as f:\n\t\t\tf.write(settings)\n\t\tsettings_save_button.configure(text='Saved!', fg_color=\"#24EA3F\")\n\n\tsettings_save_button = CTk.CTkButton(settings_window,\n\t\t\t\t\t\t\t\t\t\t height=40,\n\t\t\t\t\t\t\t\t\t\t width=120,\n\t\t\t\t\t\t\t\t\t\t border_width=2,\n\t\t\t\t\t\t\t\t\t\t corner_radius=8,\n\t\t\t\t\t\t\t\t\t\t border_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t font=(\"Lato\", 20),\n\t\t\t\t\t\t\t\t\t\t command=save_settings,\n\t\t\t\t\t\t\t\t\t\t text_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t text='Save',\n\t\t\t\t\t\t\t\t\t\t bg_color=settings_bg_color,\n\t\t\t\t\t\t\t\t\t\t fg_color=\"white\",\n\t\t\t\t\t\t\t\t\t\t hover_color=\"#24EA3F\",\n\n\t\t\t\t\t\t\t\t\t\t )\n\tsettings_save_button.place(x=300, y=520)\n\ttk.Label(settings_window, text='Close without saving \\nto cancel the changes', font=(Lato, 8), foreground=\"white\",\n\t\t\t background=\"#071F46\").place(x=305, y=560)\n\n\tsettings_window.wait_window()"
},
{
"identifier": "settings_inherit_root",
"path": "uiElements/SettingsWindow.py",
"snippet": "def settings_inherit_root(root):\n\tglobal window\n\twindow = root"
}
] | import os.path
import shutil
import tkinter as tk
import customtkinter as ctk
import threading
import cv2
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
from tkinter import PhotoImage, filedialog, messagebox
from uiElements.tkVideoPlayer import TkinterVideo
from uiElements.SettingsWindow import open_settings_window, settings_inherit_root
from time import sleep
from pathlib import Path
from shutil import move
from PIL import Image, ImageTk | 9,924 | if bool(include_crowd):
current_crowd_number_off.place_forget()
max_crowd_number_off.place_forget()
current_crowd_number.place(x=480 / 1300 * video_canvas.winfo_width(),
y=600 / 750 * video_canvas.winfo_height())
current_crowd_number.configure(text_color=color_dict[crowd_color])
mc = User.crowd_count_second.index(max(User.crowd_count_second))
max_crowd_number.configure(text_color=color_dict[crowd_color])
max_crowd_number.configure(
text=f"{seconds_to_hhmmss(mc - 1 if mc > 1 else mc)} "
f"- {seconds_to_hhmmss(mc + 1 if mc > 1 else mc + 2)}"
f" ({max(User.crowd_count_second)})")
max_crowd_number.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
else:
current_crowd_number.place_forget()
max_crowd_number.place_forget()
current_crowd_number_off.place(x=480 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
max_crowd_number_off.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
current_pd_number.configure(text_color=color_dict[pedestrian_color])
mp = User.pedestrian_count_second.index(max(User.pedestrian_count_second))
max_people_number.configure(text_color=color_dict[pedestrian_color])
max_people_number.configure(
text=f"{seconds_to_hhmmss(mp - 1 if mp > 1 else mp)} - "
f"{seconds_to_hhmmss(mp + 1 if mp > 1 else mp + 2)}"
f" ({max(User.pedestrian_count_second)})")
if not video:
video = filedialog.askopenfilename()
video_player.load(video)
progress_slider.configure(to=0, from_=0)
play_pause_button["image"] = play_button_image
progress_value.set(0)
def seek(value):
vid_player.seek(int(value))
vid_player.update()
vid_player2.seek(int(value))
vid_player2.update()
video_canvas.pack(fill="both", expand=True)
current_video_canvas = video_canvas
def resize_video_canvas():
video_canvas.config(width=root.winfo_width(), height=root.winfo_height())
Lato = "Lato"
video_title_label = tk.Label(root, text='Video Title', font=("Lato", int(20 / 750 * root.winfo_width())),
foreground="white", background="#051736")
current_timestamp = ctk.CTkLabel(root, text="00:00:00", font=("Lato", int(25 / 750 * root.winfo_width())),
fg_color="#051635", bg_color="#051635",
corner_radius=8)
current_timestamp.place(x=105, y=472)
pil_image2 = Image.open(uiAssets + 'settings.png')
settings_button_image = ImageTk.PhotoImage(pil_image2)
label_a = ctk.CTkLabel(root, text="Max # of People/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=550)
current_pd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_pd_number.place(x=440, y=545)
label_b = ctk.CTkLabel(root, text="Max # of Crowds/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=600)
current_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_c = ctk.CTkLabel(root, text="Max # of Crowds at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=600)
max_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_d = ctk.CTkLabel(root, text="Max # of People at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=550)
max_people_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
max_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
# settings_button_image = PhotoImage(file=uiAssets + 'settings.png')
settings_open_button = tk.Button(video_canvas,
image=settings_button_image,
border=0,
anchor='n',
background="#031027",
activebackground="#031027",
|
input_video_path = ""
thread_crowd, thread_people, threads_started = threading.Thread, threading.Thread, False
current_pd_number_color, current_crowd_number_color = None, None
parent = Path(__file__).resolve().parent
# if called from uiHandler will return uiElements
# if called from BeamEye.py will return GP
# we need GP//uiAssets path for ui assets
# following block is to get path to folder of the app (GP), whatever its (new) name is
# and add \\uiuAssets\\ to it
# if the parent folder isn't GP ==> a sub-folder of GP
while not os.path.isdir(str(parent) + '\\uiAssets\\'):
# go back to its parent
parent = parent.parent
GP_path = parent
uiAssets = str(GP_path) + '\\uiAssets\\'
root = tk.Tk()
root.title("BeamEye")
root.iconbitmap(uiAssets + "logo.ico")
# UI has too many elements to control during resizing, especially during video
# playback, we get screen size and base the app window on a smaller area
# before resizing is disabled.
# getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735")
settings_inherit_root(root)
img = Image.open(uiAssets + 'blurred.png')
resized_image = img.resize((root.winfo_width(), root.winfo_height()))
tk_image = ImageTk.PhotoImage(resized_image)
background_image_loading = tk_image
def open_load_window():
global progressbar, progressbar_progress, \
progressbar_placeholder_label, current_loading_canvas, \
current_video_canvas, ended, input_video_path
bg_color = "#031532"
loading_canvas = ctk.CTkCanvas(root, width=root.winfo_width() - 4, height=root.winfo_height() - 4,
bg=bg_color) # , bg="#031532")
loading_canvas.place(x=0, y=0)
current_loading_canvas = loading_canvas
loading_canvas.create_image(0, 0, image=background_image_loading, anchor="nw", )
progressbar = ctk.CTkProgressBar(master=loading_canvas, height=int(20 * root.winfo_height() / 750),
width=int(400 * root.winfo_width() / 1300), bg_color="#3C3E46",
fg_color="#4A4C51", # bg_color: for corner
# edges, fg_color inside the bar (inactive part)
progress_color="#49FF3F", border_color="#49FF3F",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar.set(0)
loading_font = ("Lato", int(40 / 750 * root.winfo_height()))
progressbar_placeholder_label = tk.Label(loading_canvas, text='Waking Up The Robot', font=loading_font,
foreground="white",
background="#031532")
canvas_width = root.winfo_width()
canvas_height = root.winfo_height()
# Calculate the position (center horizontally, 0.3 vertically)
x_p = (canvas_width - progressbar_placeholder_label.winfo_reqwidth()) / 2
y_p = canvas_height * 0.3
# Place the label using the place method
progressbar_placeholder_label.place(x=x_p, y=y_p)
p1 = tk.Label(loading_canvas, text='Extracting Video Frames...', font=loading_font, foreground="white",
background=bg_color)
p2 = tk.Label(loading_canvas, text='Processing The Frames...', font=loading_font, foreground="white",
background=bg_color)
p3 = tk.Label(loading_canvas, text='Putting The Frames Back Together', font=loading_font, foreground="white",
background=bg_color)
p4 = tk.Label(loading_canvas, text='Almost There', font=loading_font, foreground="white",
background=bg_color)
p5 = tk.Label(loading_canvas, text='All Set!', font=loading_font, foreground="#49FF3F",
background=bg_color)
progress_feedback = [p1, p2, p3, p4, p5]
def stepper(fill=False):
# full bar is 100%
# each step is 2%
global progressbar, progressbar_progress, progressbar_placeholder_label, ended
if ended:
pass
progressbar_placeholder_label.place_forget()
x_position_p = (loading_canvas.winfo_width() - progressbar.winfo_reqwidth()) / 2
progressbar.place(x=x_position_p, y=root.winfo_height() / 2 + 60)
progressbar_progress += 2
div = 100 // (len(progress_feedback) - 1)
canvas_width_here = loading_canvas.winfo_width()
canvas_height_here = loading_canvas.winfo_height()
# Calculate the position (center horizontally, 0.3 vertically)
x_position = (canvas_width_here - progress_feedback[progressbar_progress // div].winfo_reqwidth()) / 2
y_position = canvas_height_here * 0.3
# Place the label using the place method
progress_feedback[progressbar_progress // div].place(x=x_position, y=y_position)
progress_feedback[progressbar_progress // div - 1].place_forget()
progressbar.step()
if fill:
progressbar.set(1)
progressbar.place_forget()
progress_feedback[-2].place_forget()
x_position = (canvas_width_here - progress_feedback[-1].winfo_reqwidth()) / 2
y_position = canvas_height_here * 0.4
# Place the label using the place method
progress_feedback[-1].place(x=x_position, y=y_position)
ended = True
sleep(2)
progressbar_progress = 0
User.frames_progress = 0
load_video(User.input_video_path, vid_player)
load_video(User.output_video, vid_player2)
loading_canvas.destroy()
root.maxsize()
User.finished = False
return
def fill_pb():
global ended
sleep(1)
old_progress = 0
while User.frames_progress != 100:
for _ in range(old_progress, User.frames_progress, 2):
stepper()
old_progress = User.frames_progress
sleep(.1)
while not User.finished:
sleep(.5)
else:
stepper(fill=True)
ended = False
return
threading.Thread(target=fill_pb).start()
def upload_button_func():
global input_video_path, new_video, thread_people, thread_crowd, threads_started
input_video_path = filedialog.askopenfilename(initialdir=str(Path(__file__).resolve().parent.parent),
filetypes=[("Videos", "*.mp4")])
if not input_video_path:
return
nonlocal video_title_label
new_video = True
sleep(0.002)
if (thread_people is not None and thread_crowd is not None) and threads_started:
thread_people.join()
thread_crowd.join()
threads_started = False
video_title = input_video_path.split("/")[-1]
video_title_label.configure(text=video_title)
video_title_label.place(x=45, y=30)
nonlocal play_pause_button
play_pause_button.place(x=60 * root.winfo_width() / 1300, y=470 * root.winfo_height() / 750 + 10)
if input_video_path:
User.input_video_path = input_video_path
User.wait = False
sleep(0.6)
open_load_window()
def update_duration(event):
nonlocal vid_player, vid_player2
duration = vid_player.video_info()["duration"]
progress_slider["to"] = duration
def play_pause():
global new_video, thread_people, thread_crowd, threads_started
if new_video:
declare_threads()
thread_people.start()
thread_crowd.start()
new_video = False
threads_started = True
nonlocal vid_player, vid_player2
global video_end
if vid_player.is_paused() and vid_player2.is_paused():
threading.Thread(target=vid_player.play).start()
threading.Thread(target=vid_player2.play).start()
play_pause_button["image"] = pause_button_image
else:
vid_player.pause()
vid_player2.pause()
play_pause_button["image"] = play_button_image
def video_ended(event):
nonlocal vid_player, vid_player2, current_timestamp
progress_slider.set(progress_slider["to"])
play_pause_button["image"] = play_button_image
progress_slider.set(0)
current_timestamp.configure(text="00:00:00")
def update_scale(event):
global video_end
nonlocal vid_player, vid_player2
progress_value.set(int(vid_player.current_duration()))
update_current_timestamp(current_timestamp, seconds_to_hhmmss(vid_player.current_duration()))
def load_video(video: str, video_player: TkinterVideo):
nonlocal current_pd_number, current_crowd_number, \
current_crowd_number_off, max_people_number, \
max_crowd_number, max_crowd_number_off
color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow",
6: "#26FF5C"} # {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"}
with open(str(GP_path) + "\\uiElements\\userSettings.txt", "r") as f:
settings = f.read()
settings = [line.split(" ")[-1] for line in settings.split("\n")]
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(
include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)
global crowd_is_included
crowd_is_included = include_crowd
if bool(include_crowd):
current_crowd_number_off.place_forget()
max_crowd_number_off.place_forget()
current_crowd_number.place(x=480 / 1300 * video_canvas.winfo_width(),
y=600 / 750 * video_canvas.winfo_height())
current_crowd_number.configure(text_color=color_dict[crowd_color])
mc = User.crowd_count_second.index(max(User.crowd_count_second))
max_crowd_number.configure(text_color=color_dict[crowd_color])
max_crowd_number.configure(
text=f"{seconds_to_hhmmss(mc - 1 if mc > 1 else mc)} "
f"- {seconds_to_hhmmss(mc + 1 if mc > 1 else mc + 2)}"
f" ({max(User.crowd_count_second)})")
max_crowd_number.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
else:
current_crowd_number.place_forget()
max_crowd_number.place_forget()
current_crowd_number_off.place(x=480 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
max_crowd_number_off.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
current_pd_number.configure(text_color=color_dict[pedestrian_color])
mp = User.pedestrian_count_second.index(max(User.pedestrian_count_second))
max_people_number.configure(text_color=color_dict[pedestrian_color])
max_people_number.configure(
text=f"{seconds_to_hhmmss(mp - 1 if mp > 1 else mp)} - "
f"{seconds_to_hhmmss(mp + 1 if mp > 1 else mp + 2)}"
f" ({max(User.pedestrian_count_second)})")
if not video:
video = filedialog.askopenfilename()
video_player.load(video)
progress_slider.configure(to=0, from_=0)
play_pause_button["image"] = play_button_image
progress_value.set(0)
def seek(value):
vid_player.seek(int(value))
vid_player.update()
vid_player2.seek(int(value))
vid_player2.update()
video_canvas.pack(fill="both", expand=True)
current_video_canvas = video_canvas
def resize_video_canvas():
video_canvas.config(width=root.winfo_width(), height=root.winfo_height())
Lato = "Lato"
video_title_label = tk.Label(root, text='Video Title', font=("Lato", int(20 / 750 * root.winfo_width())),
foreground="white", background="#051736")
current_timestamp = ctk.CTkLabel(root, text="00:00:00", font=("Lato", int(25 / 750 * root.winfo_width())),
fg_color="#051635", bg_color="#051635",
corner_radius=8)
current_timestamp.place(x=105, y=472)
pil_image2 = Image.open(uiAssets + 'settings.png')
settings_button_image = ImageTk.PhotoImage(pil_image2)
label_a = ctk.CTkLabel(root, text="Max # of People/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=550)
current_pd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_pd_number.place(x=440, y=545)
label_b = ctk.CTkLabel(root, text="Max # of Crowds/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=600)
current_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_c = ctk.CTkLabel(root, text="Max # of Crowds at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=600)
max_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_d = ctk.CTkLabel(root, text="Max # of People at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=550)
max_people_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
max_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
# settings_button_image = PhotoImage(file=uiAssets + 'settings.png')
settings_open_button = tk.Button(video_canvas,
image=settings_button_image,
border=0,
anchor='n',
background="#031027",
activebackground="#031027", | command=open_settings_window | 1 | 2023-12-26 18:39:25+00:00 | 12k |
camenduru/MotionCtrl-hf | app.py | [
{
"identifier": "CAMERA_MOTION_MODE",
"path": "gradio_utils/camera_utils.py",
"snippet": "CAMERA_MOTION_MODE = [\"Basic Camera Poses\", \"Provided Complex Camera Poses\", \"Custom Camera Poses\"]"
},
{
"identifier": "process_camera",
"path": "gradio_utils/camera_utils.py",
"snippet": "def process_camera(camera_dict):\n # \"First A then B\", \"Both A and B\", \"Custom\"\n if camera_dict['complex'] is not None:\n with open(COMPLEX_CAMERA[camera_dict['complex']]) as f:\n RT = json.load(f) # [16, 12]\n RT = np.array(RT).reshape(-1, 3, 4)\n print(RT.shape)\n return RT\n\n\n motion_list = camera_dict['motion']\n mode = camera_dict['mode']\n speed = camera_dict['speed']\n print(len(motion_list))\n if len(motion_list) == 0:\n angle = np.array([0,0,0])\n T = np.array([0,0,0])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n elif len(motion_list) == 1:\n angle = np.array(CAMERA[motion_list[0]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"])\n print(angle, T)\n RT = get_camera_motion(angle, T, speed, 16)\n \n \n \n elif len(motion_list) == 2:\n if mode == \"Customized Mode 1: First A then B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) \n T = np.array(CAMERA[motion_list[0]][\"T\"]) \n RT_0 = get_camera_motion(angle, T, speed, 8)\n\n angle = np.array(CAMERA[motion_list[1]][\"angle\"]) \n T = np.array(CAMERA[motion_list[1]][\"T\"]) \n RT_1 = get_camera_motion(angle, T, speed, 8)\n\n RT = combine_camera_motion(RT_0, RT_1)\n\n elif mode == \"Customized Mode 2: Both A and B\":\n angle = np.array(CAMERA[motion_list[0]][\"angle\"]) + np.array(CAMERA[motion_list[1]][\"angle\"])\n T = np.array(CAMERA[motion_list[0]][\"T\"]) + np.array(CAMERA[motion_list[1]][\"T\"])\n RT = get_camera_motion(angle, T, speed, 16)\n\n\n # return RT.reshape(-1, 12)\n return RT"
},
{
"identifier": "OBJECT_MOTION_MODE",
"path": "gradio_utils/traj_utils.py",
"snippet": "OBJECT_MOTION_MODE = [\"Provided Trajectory\", \"Custom Trajectory\"]"
},
{
"identifier": "get_provided_traj",
"path": "gradio_utils/traj_utils.py",
"snippet": "def get_provided_traj(traj_name):\n traj = read_points(PROVIDED_TRAJS[traj_name])\n # xrange from 256 to 1024\n traj = [[int(1024*x/256), int(1024*y/256)] for x,y in traj]\n return traj"
},
{
"identifier": "process_points",
"path": "gradio_utils/traj_utils.py",
"snippet": "def process_points(points):\n frames = 16\n defualt_points = [[512,512]]*16\n\n if len(points) < 2:\n return defualt_points\n elif len(points) >= frames:\n skip = len(points)//frames\n return points[::skip][:15] + points[-1:]\n else:\n insert_num = frames - len(points)\n insert_num_dict = {}\n interval = len(points) - 1\n n = insert_num // interval\n m = insert_num % interval\n for i in range(interval):\n insert_num_dict[i] = n\n for i in range(m):\n insert_num_dict[i] += 1\n\n res = []\n for i in range(interval):\n insert_points = []\n x0,y0 = points[i]\n x1,y1 = points[i+1]\n\n delta_x = x1 - x0\n delta_y = y1 - y0\n for j in range(insert_num_dict[i]):\n x = x0 + (j+1)/(insert_num_dict[i]+1)*delta_x\n y = y0 + (j+1)/(insert_num_dict[i]+1)*delta_y\n insert_points.append([int(x), int(y)])\n\n res += points[i:i+1] + insert_points\n res += points[-1:]\n return res"
},
{
"identifier": "process_traj",
"path": "gradio_utils/traj_utils.py",
"snippet": "def process_traj(points, device='cpu'):\n xy_range = 1024\n points = process_points(points)\n points = [[int(256*x/xy_range), int(256*y/xy_range)] for x,y in points]\n \n optical_flow = get_flow(points)\n # optical_flow = torch.tensor(optical_flow).to(device)\n\n return optical_flow"
},
{
"identifier": "vis_camera",
"path": "gradio_utils/utils.py",
"snippet": "def vis_camera(RT_list, rescale_T=1):\n fig = go.Figure()\n showticklabels = True\n visible = True\n scene_bounds = 2\n base_radius = 2.5\n zoom_scale = 1.5\n fov_deg = 50.0\n \n edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 3), (3, 1), (3, 4)] \n \n colors = px.colors.qualitative.Plotly\n \n cone_list = []\n n = len(RT_list)\n for i, RT in enumerate(RT_list):\n R = RT[:,:3]\n T = RT[:,-1]/rescale_T\n cone = calc_cam_cone_pts_3d(R, T, fov_deg)\n cone_list.append((cone, (i*1/n, \"green\"), f\"view_{i}\"))\n\n \n for (cone, clr, legend) in cone_list:\n for (i, edge) in enumerate(edges):\n (x1, x2) = (cone[edge[0], 0], cone[edge[1], 0])\n (y1, y2) = (cone[edge[0], 1], cone[edge[1], 1])\n (z1, z2) = (cone[edge[0], 2], cone[edge[1], 2])\n fig.add_trace(go.Scatter3d(\n x=[x1, x2], y=[y1, y2], z=[z1, z2], mode='lines',\n line=dict(color=clr, width=3),\n name=legend, showlegend=(i == 0))) \n fig.update_layout(\n height=500,\n autosize=True,\n # hovermode=False,\n margin=go.layout.Margin(l=0, r=0, b=0, t=0),\n \n showlegend=True,\n legend=dict(\n yanchor='bottom',\n y=0.01,\n xanchor='right',\n x=0.99,\n ),\n scene=dict(\n aspectmode='manual',\n aspectratio=dict(x=1, y=1, z=1.0),\n camera=dict(\n center=dict(x=0.0, y=0.0, z=0.0),\n up=dict(x=0.0, y=-1.0, z=0.0),\n eye=dict(x=scene_bounds/2, y=-scene_bounds/2, z=-scene_bounds/2),\n ),\n\n xaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n yaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n ),\n \n \n zaxis=dict(\n range=[-scene_bounds, scene_bounds],\n showticklabels=showticklabels,\n visible=visible,\n )\n ))\n return fig"
},
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n # f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')\n # print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n un_kwargs = kwargs.copy()\n if isinstance(unconditional_conditioning, dict):\n for uk, uv in unconditional_conditioning.items():\n if uk in un_kwargs:\n un_kwargs[uk] = uv\n unconditional_conditioning = unconditional_conditioning['uc']\n if 'cond_T' in kwargs and t < kwargs['cond_T']:\n if 'features_adapter' in kwargs:\n kwargs.pop('features_adapter')\n un_kwargs.pop('features_adapter')\n # kwargs['features_adapter'] = None\n # un_kwargs['features_adapter'] = None\n # if 'pose_emb' in kwargs:\n # kwargs.pop('pose_emb')\n # un_kwargs.pop('pose_emb')\n # kwargs['pose_emb'] = None\n # un_kwargs['pose_emb'] = None\n e_t = self.model.apply_model(x, t, c, **kwargs)\n # e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0"
},
{
"identifier": "DEFAULT_NEGATIVE_PROMPT",
"path": "main/evaluation/motionctrl_inference.py",
"snippet": "DEFAULT_NEGATIVE_PROMPT = 'blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, '\\\n 'sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, '\\\n 'disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, '\\\n 'floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation'\n RT = camera_poses[..., None]\n RT = None\ndef load_model_checkpoint(model, ckpt, adapter_ckpt=None):\ndef load_trajs(cond_dir, trajs):\ndef load_camera_pose(cond_dir, camera_poses):\ndef save_results(samples, filename, savedir, fps=10):\ndef motionctrl_sample(\n model, \n prompts, \n noise_shape,\n camera_poses=None, \n trajs=None,\n n_samples=1,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_scale_temporal=None,\n ddim_steps=50,\n ddim_eta=1.,\n **kwargs):\ndef run_inference(args, gpu_num, gpu_no):\ndef save_images(samples, savedir):\ndef get_parser():"
},
{
"identifier": "instantiate_from_config",
"path": "utils/utils.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | import argparse
import os
import tempfile
import cv2
import gradio as gr
import imageio
import numpy as np
import torch
import torchvision
from functools import partial
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera
from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj,
process_points, process_traj)
from gradio_utils.utils import vis_camera
from lvdm.models.samplers.ddim import DDIMSampler
from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT,
load_model_checkpoint,
post_prompt)
from utils.utils import instantiate_from_config | 7,950 | BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list
|
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#### Description ####
title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
description = r"""
<b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
🤗 Try to control the motion of the generated videos yourself!<br>
❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br>
"""
article = r"""
If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
[](https://github.com/TencentARC/MotionCtrl)
---
📝 **Citation**
<br>
If our work is useful for your research, please consider citing:
```bibtex
@inproceedings{wang2023motionctrl,
title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
booktitle={arXiv preprint arXiv:2312.03641},
year={2023}
}
```
📧 **Contact**
<br>
If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
"""
css = """
.gradio-container {width: 85% !important}
.gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;}
span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;}
button {border-radius: 8px !important;}
.add_button {background-color: #4CAF50 !important;}
.remove_button {background-color: #f44336 !important;}
.clear_button {background-color: gray !important;}
.mask_button_group {gap: 10px !important;}
.video {height: 300px !important;}
.image {height: 300px !important;}
.video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;}
.video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;}
.margin_center {width: 50% !important; margin: auto !important;}
.jc_center {justify-content: center !important;}
"""
T_base = [
[1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
[-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
[0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
[0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
[0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
[0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
]
radius = 1
n = 16
# step =
look_at = np.array([0, 0, 0.8]).reshape(3,1)
# look_at = np.array([0, 0, 0.2]).reshape(3,1)
T_list = []
base_R = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
res = []
res_forsave = []
T_range = 1.8
for i in range(0, 16):
# theta = (1)*np.pi*i/n
R = base_R[:,:3]
T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
RT = np.concatenate([R,T], axis=1)
res.append(RT)
fig = vis_camera(res)
# MODE = ["camera motion control", "object motion control", "camera + object motion control"]
MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"]
BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list | traj_list = get_provided_traj(traj_name) | 3 | 2023-12-27 19:32:03+00:00 | 12k |
0x00wolf/hkrsAI | hkrsai.py | [
{
"identifier": "fetch_args",
"path": "src/args.py",
"snippet": "def fetch_args():\n \"\"\"Function to handle command-line arguments\"\"\"\n p = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog='hkrsAI.v2',\n description=DESCRIPTION,\n epilog=MORE_INFO\n )\n p.add_argument('--system-prompt', '-sp', required=False, default=None, dest='system_prompt',\n help=SYSTEM_PROMPT),\n p.add_argument('--model', '-m', default='gpt-3.5-turbo', type=str, choices=['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo'],\n help=MODEL),\n p.add_argument('--temperature', '-t', type=float, default=0.7, # min=0, max=2.0,\n help=TEMPERATURE),\n p.add_argument('--frequency-penalty', '-fp', type=float, default=0, # min=-2.0, max=2.0,\n help=FREQUENCY_PENALTY),\n p.add_argument('--presence-penalty', '-pp', type=float, default=0, # min=-2.0, max=2.0,\n help=PRESENCE_PENALTY),\n p.add_argument('--top-p', type=float, default=1.0, help=TOP_P), # min=0.1, max=1.0,\n # todo: p.add_argument('-st', '--stop', default=[], nargs='*', help=variables.stop),\n p.add_argument('--max-tokens', '-mt', type=int, default=1000, help=MAX_TOKENS),\n p.add_argument('-n', type=int, default=1, help=N),\n p.add_argument('--log-level', '-ll', default=2, type=int, help=LOG_LEVEL),\n p.add_argument('--log-format', '-lf', default='json', type=str, help=LOG_FORMAT)\n return p.parse_args()"
},
{
"identifier": "PathFinder",
"path": "src/pathfinder.py",
"snippet": "class PathFinder:\n \"\"\"Class that returns an object with necessary paths for runtime operations\"\"\"\n def __init__(self, cwd: str):\n self.cwd = cwd\n self.config = f'{self.cwd}/config.json'\n self.logs = f'{self.cwd}/logs'\n self.prompts = f'{self.cwd}/prompts'\n self._first_runtime()\n self._prompts_dir_exists()\n\n @staticmethod\n def _get_cwd():\n \"\"\"Fetch the current working directory\"\"\"\n abs_path = os.path.abspath(__file__)\n cwd = os.path.dirname(abs_path)\n return cwd\n\n def _first_runtime(self):\n \"\"\"Initialize the config.json and logs directory if not present at runtime.\"\"\"\n self._init_cfg_json()\n self._init_logs_dir()\n\n def _prompts_dir_exists(self):\n \"\"\"Check to see if the prompts directory is present, or print an error and exit.\"\"\"\n if not os.path.exists(self.prompts):\n print('[*] error: prompts directory is missing')\n sys.exit()\n\n def _init_cfg_json(self):\n \"\"\"Generate the config.json file.\"\"\"\n if not os.path.exists(self.config):\n self._dump(CONFIG_INIT, self.config)\n\n def _init_logs_dir(self):\n \"\"\"Generate the logs directory\"\"\"\n if not os.path.exists(self.logs):\n os.makedirs(self.logs)\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
},
{
"identifier": "Client",
"path": "src/client.py",
"snippet": "class Client:\n \"\"\"A class representing the OpenAI API Client\"\"\"\n def __init__(self, config):\n self.client = None\n self.api_key = ''\n self.config = config\n\n def initialize(self):\n \"\"\"Checks config.json for a stored API key, or prompts the user to input a new key\"\"\"\n config_data = self._json_load(self.config)\n api_key = config_data['api_key']\n if api_key:\n good_key = self.test_key(api_key)\n if good_key:\n self.api_key = api_key\n self.client = openai.OpenAI(api_key=self.api_key)\n else:\n self.set_key()\n else:\n self.set_key()\n\n @staticmethod\n def test_key(api_key):\n \"\"\"Send a test message to the GPT API to check if an API key is valid\"\"\"\n client = openai.OpenAI(api_key=api_key)\n try:\n try:\n response = client.chat.completions.create(\n model='gpt-3.5-turbo',\n max_tokens=5,\n messages=[{'role': 'user', 'content': 'This is a test .'}])\n except openai.AuthenticationError:\n print('[*] error, invalid API key')\n return False\n else:\n print('[*] API key verified')\n return True\n except openai.APIConnectionError:\n print('[*] network connection error\\n[*] exiting')\n sys.exit()\n\n def set_key(self):\n \"\"\"Set a new API key and test if it is valid\"\"\"\n while True:\n self.api_key = input('[*] insert OpenAI API key:\\n>')\n valid_key = self.test_key(self.api_key)\n if valid_key:\n config_data = self._json_load(self.config)\n config_data['api_key'] = self.api_key\n self._json_dump(config_data, self.config)\n self.client = openai.OpenAI(api_key=self.api_key)\n return\n\n @staticmethod\n def _json_load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _json_dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
},
{
"identifier": "GPT",
"path": "src/gpt.py",
"snippet": "class GPT:\n def __init__(self, client, model, temperature, top_p, n, frequency_penalty, presence_penalty, max_tokens):\n self.client = client\n self.model = model\n self.temperature = temperature\n self.top_p = top_p\n self.n = n\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.max_tokens = max_tokens\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, new_value: str):\n new_value = str(new_value)\n if new_value == 'gpt-3.5-turbo' or new_value == 'gpt-4':\n self._model = new_value\n else:\n raise ValueError(f'\\n{BAD_MODEL.format(new_value)}')\n\n @property\n def temperature(self):\n return self._temperature\n\n @temperature.setter\n def temperature(self, new_value: float):\n new_value = float(new_value)\n if not (0.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_TEMP.format(new_value)}')\n else:\n self._temperature = new_value\n\n @property\n def top_p(self):\n return self._top_p\n\n @top_p.setter\n def top_p(self, new_value: float):\n new_value = float(new_value)\n if not (0 <= new_value <= 1.0):\n raise ValueError(f'\\n{BAD_TP.format(new_value)}')\n else:\n self._top_p = new_value\n\n @property\n def frequency_penalty(self):\n return self._frequency_penalty\n\n @frequency_penalty.setter\n def frequency_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_FP.format(new_value)}')\n else:\n self._frequency_penalty = new_value\n\n @property\n def presence_penalty(self):\n return self._presence_penalty\n\n @presence_penalty.setter\n def presence_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_PP.format(new_value)}')\n else:\n self._presence_penalty = new_value\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, new_value):\n new_value = int(new_value)\n if not (1 <= new_value <= 20):\n raise ValueError(f'\\n{BAD_N.format(new_value)}')\n else:\n self._n = new_value\n\n @property\n def max_tokens(self):\n return self._max_tokens\n\n @max_tokens.setter\n def max_tokens(self, new_value: int):\n new_value = int(new_value)\n if not (1 <= new_value <= 4096):\n raise ValueError(f'\\n{BAD_MT.format(new_value)}')\n else:\n self._max_tokens = new_value"
},
{
"identifier": "SystemPrompt",
"path": "src/systemprompt.py",
"snippet": "class SystemPrompt:\n \"\"\"A class that manages setting the system prompt used to define AI assistants. \\\n To add a new system prompt that will be selectable from the runtime menu, \\\n copy the prompt to an extensionless file in the appropriate category folder.\"\"\"\n def __init__(self, prompts_dir, path=''):\n self.dir = prompts_dir\n self.path = path\n self.content = ''\n self.title = 'custom'\n self._start()\n\n def _start(self):\n \"\"\"Allow the user to define a custom prompt, or select one of the pre-made options\"\"\"\n if not self.path:\n self.content = input(\"\\n[*] input a custom system prompt, \\\n \\n[*] hit enter to view preexisting options:\\n>\")\n if not self.content:\n self._set()\n else:\n self.content = self._fetch_contents(self.path)\n self.title = self.path.rpartition('/')[-1]\n\n def _set(self):\n \"\"\"Loop that runs until a prompt has been selected\"\"\"\n while True:\n category = self._select_category()\n title = self._select_prompt(category)\n if title == 'back':\n pass\n else:\n self.path = f'{self.dir}/{category}/{title}'\n prompt = self._fetch_contents(self.path)\n print(f'\\n{prompt}\\n')\n set_prompt = input(\"[*] select prompt\\n\\n[-] 'enter' to accept\\n[-] 'n' to go back\\n\"\n \"[-] 'x' to enter a custom font'\\n>\")\n if set_prompt == 'x':\n return SystemPrompt(prompts_dir=self.dir)\n elif set_prompt == 'n':\n pass\n else:\n self.title = self.path.rpartition('/')[-1]\n self.content = prompt\n print(f'[*] system prompt: {self.title}\\n[*] query AI:')\n return\n\n def _select_category(self):\n \"\"\"Select a system prompt category from the pre-made options\"\"\"\n print('\\n[-] categories\\n')\n categories = self._fetch_from(self.dir)\n categories.sort()\n choice = self._make_choice(categories)\n print(f'\\n[*] category: {choice}')\n return choice\n\n def _select_prompt(self, category):\n \"\"\"Select a pre-made system prompt from a particular category\"\"\"\n print('[-] prompts\\n')\n category = f'{self.dir}/{category}'\n system_prompts = self._fetch_from(category)\n system_prompts.sort()\n self.path = self._make_choice(system_prompts, go_back=True)\n return self.path\n\n def _make_choice(self, options_list, go_back=False):\n \"\"\"Provides the user with the ability to select a prompt from an enumerated options list\"\"\"\n # Select from a list of options by the objects enumerated position\n while True:\n try:\n self._enumerate_list(options_list, go_back)\n selection = input('\\n[*] select by position:\\n>')\n selection = int(selection)\n if 1 <= selection <= len(options_list):\n return options_list[selection - 1]\n elif go_back and selection == len(options_list) + 1:\n return 'back'\n except ValueError:\n print('[*] invalid selection')\n\n @staticmethod\n def _enumerate_list(options_list, go_back=False):\n \"\"\"\"Enumerates a list of options\"\"\"\n for x, _item in enumerate(options_list, 1):\n print(f'{x}. {_item}')\n if go_back:\n print(f'{x + 1}. back')\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Fetches the contents of a file\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass\n\n @staticmethod\n def _fetch_from(root_dir):\n \"\"\"Returns a list containing the contents of a directory\"\"\"\n directories = os.listdir(root_dir)\n return directories"
},
{
"identifier": "Conversation",
"path": "src/conversation.py",
"snippet": "class Conversation:\n messages: list[dict] = dataclasses.field(default_factory=list)\n query: str = ''\n reply: str = ''\n response: dict = dataclasses.field(default_factory=dict)\n tokens: int = 0\n\n def start(self, system_prompt: str):\n self.messages = [{\"role\": \"system\", \"content\": system_prompt}]\n print()\n return Conversation(messages=self.messages)\n\n def speak(self, content: str):\n self.messages.append({\"role\": \"user\", \"content\": content})\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def think(self, thought):\n if self.query == '':\n self.query = thought\n else:\n self.query = f'{self.query}\\n{thought}'\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def listen(self, gpt: GPT):\n \"\"\"Function to perform GPT chat completions via the API\"\"\"\n self.response = gpt.client.chat.completions.create(\n model=gpt.model,\n messages=self.messages,\n temperature=gpt.temperature,\n top_p=gpt.top_p,\n n=gpt.n,\n max_tokens=gpt.max_tokens,\n frequency_penalty=gpt.frequency_penalty,\n presence_penalty=gpt.presence_penalty,\n )\n self.reply = self.response.choices[0].message.content\n self.tokens = self.response.usage.total_tokens\n print(f\"\\n{self.reply}\\n\")\n self.messages.append({\"role\": \"assistant\", \"content\": self.reply})\n\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def breath(self):\n return Conversation(messages=self.messages, query='', reply=self.reply, response=self.response)\n\n @staticmethod\n def greet():\n return Conversation(messages=[], query='', reply='', response=None)"
},
{
"identifier": "Action",
"path": "src/action.py",
"snippet": "class Action:\n \"\"\"Action dataclass returned by the input parser after parsing new input. \\\n Gets passed to the dispatcher who turns Action into function.\"\"\"\n command: str = ''\n arguments: list[str] = dataclasses.field(default_factory=list)\n raw_input: str = ''"
},
{
"identifier": "InputParser",
"path": "src/inputparser.py",
"snippet": "class InputParser:\n @staticmethod\n def parse(user_input):\n \"\"\"parses user input and passes an Action to the Dispatcher\"\"\"\n if user_input.startswith('>'):\n if ' ' in user_input:\n user_input = user_input.split(' ')\n command = user_input.pop(0).replace('>', '')\n arguments = user_input[:]\n return Action(command=command, arguments=arguments)\n else:\n command = user_input.replace('>', '')\n for _command in COMMANDS:\n if command == _command:\n return Action(command=command)\n return Action(command='error')\n else:\n action = Action(command='chat', raw_input=user_input)\n return action"
},
{
"identifier": "Dispatcher",
"path": "src/dispatcher.py",
"snippet": "class Dispatcher:\n \"\"\"Dispatches functions and manages conversation state.\"\"\"\n def __init__(self):\n self.thinking: bool = False\n\n def dispatch(self, action: Action):\n \"\"\"Turns an Action into a function\"\"\"\n if action.command == 'stop':\n self.thinking = True # >stop\n return self.silence\n elif action.command == 'start':\n self.thinking = False # >start\n return self.silence\n elif self.thinking and action.command == 'chat':\n return self.think\n elif action.command == 'chat':\n return self.speak\n elif action.command == 'exec':\n return self.execute\n elif action.command == 'insert':\n return self.insert\n elif action.command == 'show':\n return self.show\n elif action.command == 'flush':\n return self.flush\n elif action.command == 'save':\n return self.save\n elif action.command == 'set':\n return self.set\n elif action.command == 'reset':\n return self.reset\n elif action.command == 'help':\n return self.help\n elif action.command == 'exit':\n return self.goodbye\n else:\n return self.silence\n\n @staticmethod\n def silence(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Whereof one cannot speak, thereof one must be silent\"\"\"\n return gpt, conversation, logger\n\n @staticmethod\n def think(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\">stop While thinking: Append user input to conversation.query.\"\"\"\n conversation = conversation.think(action.raw_input)\n return gpt, conversation, logger\n\n @staticmethod\n def speak(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Send query to GPT API and receive response\"\"\"\n try:\n if action.raw_input == '':\n conversation = conversation.speak(content=conversation.query)\n elif action.raw_input != '' and conversation.query == '':\n conversation = conversation.speak(content=action.raw_input)\n elif action.raw_input != '' and conversation.query != '':\n conversation = conversation.speak(content=f'{conversation.query}\\n{action.raw_input}')\n conversation = conversation.listen(gpt=gpt)\n logger = logger.log(conversation)\n except openai.BadRequestError as e:\n print(f'[*] ')\n except openai.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"[*] OpenAI API returned an API Error: {e}\")\n except openai.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"[*] OpenAI API request exceeded rate limit: {e}\")\n return gpt, conversation.breath(), logger\n\n # >exec\n def execute(self, gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Execute a system-wide command from within the program.\n Author's intended use case is directory traversal\"\"\"\n try:\n if action.arguments[0] == 'cd': # hack to allow the user to change directories\n if action.arguments[1] == 'home':\n os.chdir(logger.paths.cwd)\n else:\n os.chdir(action.arguments[1])\n print(f'[*] cwd ~ {os.getcwd()}')\n elif action.arguments[0] == 'cat':\n print(self._fetch_contents(action.arguments[1]), '\\n')\n else:\n output = subprocess.check_output(action.arguments[:], shell=True, text=True,\n stderr=subprocess.STDOUT, timeout=3)\n print(output)\n except subprocess.CalledProcessError as e:\n print(f'[*] subprocess error: {e}')\n except OSError as e:\n print(f'[*] os error: {e}')\n return gpt, conversation, logger\n\n # >insert\n def insert(self, gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"While thinking: Appends the contents of a file to the query with the >insert command\"\"\"\n insert_me = self._fetch_contents(action.arguments[0])\n conversation.query += f'{conversation.query}\\n{insert_me}'\n return gpt, conversation, logger\n\n # >flush\n @staticmethod\n def flush(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"While thinking: Resets the conversation.query to ''\"\"\"\n conversation = conversation.breath()\n return gpt, conversation, logger\n\n # >save\n @staticmethod\n def save(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Extract and save code, the reply, or the response object to an absolute, relative, or generic path\"\"\"\n try:\n logger.save(arguments=action.arguments, conversation=conversation)\n except FileNotFoundError:\n print(f'[*] error saving data')\n return gpt, conversation, logger\n\n # >set\n @staticmethod\n def set(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Allows the user to change the values for the keys of instantiated objects\"\"\"\n try:\n if action.arguments[0] == 'level' or action.arguments[0] == 'format':\n setattr(logger, action.arguments[0], ast.literal_eval(action.arguments[1]))\n print(f'[*] gpt ~ {action.arguments[0]}: {action.arguments[1]}')\n elif action.arguments[0] in ['model', 'temperature', 'top_p', 'n', 'frequency_penalty',\n 'presence_penalty', 'max_tokens']:\n setattr(gpt, action.arguments[0], ast.literal_eval(action.arguments[1]))\n print(f'[*] gpt ~ {action.arguments[0]}: {action.arguments[1]}')\n elif action.arguments[0] == 'gpt':\n if action.arguments[1] == 'client':\n print('[*] use `>reset client` to change API key')\n else:\n setattr(gpt, action.arguments[1], ast.literal_eval(action.arguments[2]))\n print(f'[*] {action.arguments[0]} ~ {action.arguments[1]}: {action.arguments[2]}')\n elif action.arguments[0] == 'logger' and ('format' == action.arguments[1] == 'level'):\n setattr(logger, action.arguments[1], ast.literal_eval(action.arguments[2]))\n print(f'[*] {action.arguments[0]} ~ {action.arguments[1]}: {action.arguments[2]}')\n else:\n print('[*] invalid entry')\n except AttributeError:\n print('[*] attribute error')\n except ValueError:\n print('[*] value error')\n except TypeError:\n print('[*] type error')\n return gpt, conversation, logger\n\n # >show\n @staticmethod\n def show(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Display values contained by objects: gpt, conversation, action, and logger\"\"\"\n try:\n if len(action.arguments) == 0:\n print(f'[*] query:\\n{conversation.query}\\n')\n elif action.arguments[0] == 'query':\n print(f'[*] query:\\n{conversation.query}\\n')\n elif action.arguments[0] == 'gpt' and len(action.arguments) == 2:\n print(f'[*] gpt ~ {action.arguments[1]}: {getattr(gpt, action.arguments[1])}\\n')\n elif action.arguments[0] == 'conversation':\n print(f\"[*] conversation ~ {action.arguments[1]}: {getattr(conversation, action.arguments[1])}\\n\")\n elif action.arguments[0] == 'logger':\n print(f'[*]logger ~ {action.arguments[1]}: {getattr(logger, action.arguments[1])}')\n elif action.arguments[0] == 'all':\n objects = [gpt, logger]\n for instance in objects:\n print(f'\\n[*] {type(instance).__name__}')\n for key, value in instance.__dict__.items():\n if key == 'client' or key == 'paths':\n pass\n else:\n print(f\"[-] {key.lstrip('_')}: {value}\")\n elif action.arguments[0] == 'gpt':\n print(\"\\n[*] GPT:\")\n for key, value in gpt.__dict__.items():\n if key == 'client':\n pass\n else:\n print(f\"[-] {key.lstrip('_')}: {value}\")\n elif [action.arguments[0] in k for k, v in gpt.__dict__.items()]:\n print(f\"[*] {action.arguments[0]}: {getattr(gpt, action.arguments[0])}\")\n except AttributeError:\n print('[*] invalid entry')\n return gpt, conversation, logger\n\n # >reset\n @staticmethod\n def reset(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Reset the AI assistant, or start a new log entry\"\"\"\n if len(action.arguments) == 0 or ('chat' == action.arguments[0] == 'conversation'):\n print('[*] resetting AI')\n logger.new_log()\n prompt = SystemPrompt(prompts_dir=logger.paths.prompts)\n conversation = Conversation().start(prompt.content)\n elif action.arguments[0] == 'log':\n logger.new_log()\n return gpt, conversation, logger\n\n # >help\n @staticmethod\n def help(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"Prints the help string for the context management commands\"\"\"\n print(f'[*] hkrsAI\\ncommand: >help\\n{HELP_STRING}')\n return gpt, conversation, logger\n\n # >exit\n @staticmethod\n def goodbye(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):\n \"\"\"This is the end\"\"\"\n logger.log(conversation)\n print('\\n[*] exiting\\n')\n sys.exit()\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Return the contents of a file as a string variable\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass"
},
{
"identifier": "Logger",
"path": "src/logger.py",
"snippet": "class Logger:\n def __init__(self, paths: PathFinder, log_level: int, log_format: str):\n \"\"\"Logs conversations and saves data at the user's request\"\"\"\n self.level: int = log_level\n self.format: str = log_format\n self.paths: Paths = paths\n self.number: int = 0\n self.file: str = ''\n self.savefile: str = ''\n self.save_number: int = 0\n self.new_log()\n\n @property\n def level(self):\n return self._level\n\n @level.setter\n def level(self, new_value: int):\n if 1 != new_value != 2:\n raise TypeError\n else:\n self._level = new_value\n\n @property\n def format(self):\n return self._format\n\n @format.setter\n def format(self, new_value: str):\n if new_value == 'txt' or new_value == 'json':\n self._format = new_value\n else:\n self._format = new_value\n\n def new_log(self):\n self.number = self._next_number()\n self.file = self._new_file()\n \n def _next_number(self):\n \"\"\"Fetch the next log number from config.json and updates it\"\"\"\n config_data = self._load(self.paths.config)\n self.number = log_num = config_data['log_number']\n config_data['log_number'] = self.number + 1\n self._dump(config_data, self.paths.config)\n return self.number\n \n def _new_file(self):\n \"\"\"Generates a new logfile relative the current log number\"\"\"\n while True: # to prevent inadvertently overwriting logs if the value is changed in config.json\n self.file = f'{self.paths.logs}/log{self.number}.{self.format}'\n try:\n with open(self.file, 'x'):\n print(f'[*] logfile generated ~ {self.file}')\n return self.file\n except FileExistsError:\n self.number += 1\n\n def log(self, conversation: Conversation):\n \"\"\"Logs the response or messages as a JSON or TXT file relative to args\"\"\"\n if self.level == 1 and self.format != 'txt':\n print('[*] level 1 only supports .txt output')\n self.format = 'txt'\n if self.level == 1:\n self._dump(str(conversation.response), self.file)\n return self\n elif self.level == 2 and self.format == 'json':\n self._dump(conversation.messages, self.file)\n return self\n elif self.level == 2 and self.format == 'txt':\n with open(self.file, 'w') as f:\n for i in range(len(conversation.messages)):\n f.write(f\"{conversation.messages[i]['role']}:--------------\\n\\n\" \\\n f\"{conversation.messages[i]['content']}\\n\\n\")\n return self\n\n # >save\n def save(self, arguments, conversation):\n \"\"\"Saves information at the user's request\"\"\"\n if len(arguments) == 0:\n self._update_savefile()\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n return\n if len(arguments) != 2:\n self._update_savefile()\n else:\n self.savefile = arguments[1]\n if arguments[0] == 'code':\n p = re.compile(r\"```((.|\\n)*)```\")\n match = re.search(p, conversation.reply)\n if match:\n self._save_text(self.savefile, match.group())\n print(f'[*] saving code to ~ {self.savefile}')\n else:\n print('[*] error: regex failed.\\n[*] ensure that GPT presents code in blocks ```code```')\n if arguments[0] == 'reply':\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n elif arguments[0] == 'response':\n self._save_text(self.savefile, str(conversation.response))\n print(f'[*] saving response to ~ {self.savefile}')\n\n def _update_savefile(self):\n self.savefile = f'{self.paths.logs}/log{self.number}-{self.save_number}.pktai'\n self.save_number += 1\n\n @staticmethod\n def _save_text(filename, _text):\n \"\"\"Simple funtion to save text to a file\"\"\"\n with open(filename, 'w') as f:\n f.write(_text)\n\n @staticmethod\n def _load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)"
}
] | import sys
import os
import readline
from src.args import fetch_args
from src.pathfinder import PathFinder
from src.client import Client
from src.gpt import GPT
from src.systemprompt import SystemPrompt
from src.conversation import Conversation
from src.action import Action
from src.inputparser import InputParser
from src.dispatcher import Dispatcher
from src.logger import Logger | 7,717 |
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
|
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
| client = Client(config=paths.config) # OpenAI API client management object | 2 | 2023-12-22 07:04:47+00:00 | 12k |
hughxiouge/CompoundE3D | run.py | [
{
"identifier": "KGEModel",
"path": "model.py",
"snippet": "class KGEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, evaluator,\n double_entity_embedding=False, \n double_relation_embedding=False, triple_relation_embedding=False, quad_relation_embedding=False):\n super(KGEModel, self).__init__()\n self.model_name = model_name\n self.nentity = nentity\n self.nrelation = nrelation\n self.hidden_dim = hidden_dim\n self.epsilon = 2.0\n\n self.gamma = nn.Parameter(\n torch.Tensor([gamma]),\n requires_grad=False\n )\n\n self.embedding_range = nn.Parameter(\n torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),\n requires_grad=False\n )\n\n self.entity_dim = hidden_dim*2 if double_entity_embedding else hidden_dim\n\n if double_relation_embedding:\n self.relation_dim = hidden_dim*2\n elif triple_relation_embedding:\n self.relation_dim = hidden_dim*3\n elif quad_relation_embedding:\n self.relation_dim = hidden_dim*4\n else:\n self.relation_dim = hidden_dim\n\n self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))\n nn.init.uniform_(\n tensor=self.entity_embedding,\n a=-self.embedding_range.item(),\n b=self.embedding_range.item()\n )\n\n self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))\n nn.init.uniform_(\n tensor=self.relation_embedding,\n a=-self.embedding_range.item(),\n b=self.embedding_range.item()\n )\n\n #Do not forget to modify this line when you add a new model in the \"forward\" function\n if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'PairRE', 'RotatEv2', 'CompoundE', 'CompoundE3D_Complete_Mix_T_H']:\n raise ValueError('model %s not supported' % model_name)\n\n if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):\n raise ValueError('RotatE should use --double_entity_embedding')\n\n if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):\n raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')\n\n if model_name == 'PairRE' and (not double_relation_embedding):\n raise ValueError('PairRE should use --double_relation_embedding')\n\n if (model_name == 'CompoundE' or model_name == 'CompoundE3D_Complete_Mix_T_H') and (not triple_relation_embedding):\n raise ValueError('CompoundE should use --triple_relation_embedding')\n\n self.evaluator = evaluator\n\n def forward(self, sample, mode='single'):\n '''\n Forward function that calculate the score of a batch of triples.\n In the 'single' mode, sample is a batch of triple.\n In the 'head-batch' or 'tail-batch' mode, sample consists two part.\n The first part is usually the positive sample.\n And the second part is the entities in the negative samples.\n Because negative samples and positive samples usually share two elements\n in their triple ((head, relation) or (relation, tail)).\n '''\n\n if mode == 'single':\n batch_size, negative_sample_size = sample.size(0), 1\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=sample[:,0]\n ).unsqueeze(1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=sample[:,1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=sample[:,2]\n ).unsqueeze(1)\n\n elif mode == 'head-batch':\n tail_part, head_part = sample\n batch_size, negative_sample_size = head_part.size(0), head_part.size(1)\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=head_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=tail_part[:, 1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=tail_part[:, 2]\n ).unsqueeze(1)\n\n elif mode == 'tail-batch':\n head_part, tail_part = sample\n batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)\n\n head = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=head_part[:, 0]\n ).unsqueeze(1)\n\n relation = torch.index_select(\n self.relation_embedding,\n dim=0,\n index=head_part[:, 1]\n ).unsqueeze(1)\n\n tail = torch.index_select(\n self.entity_embedding,\n dim=0,\n index=tail_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n\n else:\n raise ValueError('mode %s not supported' % mode)\n\n model_func = {\n 'TransE': self.TransE,\n 'DistMult': self.DistMult,\n 'ComplEx': self.ComplEx,\n 'RotatE': self.RotatE,\n 'PairRE': self.PairRE,\n 'RotatEv2': self.RotatEv2,\n 'CompoundE': self.CompoundE,\n 'CompoundE3D_Complete_Mix_T_H': self.CompoundE3D_Complete_Mix_T_H\n }\n\n if self.model_name in model_func:\n score = model_func[self.model_name](head, relation, tail, mode)\n else:\n raise ValueError('model %s not supported' % self.model_name)\n\n return score\n\n def TransE(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head + (relation - tail)\n else:\n score = (head + relation) - tail\n\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def DistMult(self, head, relation, tail, mode):\n if mode == 'head-batch':\n score = head * (relation * tail)\n else:\n score = (head * relation) * tail\n\n score = score.sum(dim = 2)\n return score\n\n def ComplEx(self, head, relation, tail, mode):\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_relation, im_relation = torch.chunk(relation, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n score = re_head * re_score + im_head * im_score\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n score = re_score * re_tail + im_score * im_tail\n\n score = score.sum(dim = 2)\n return score\n\n def RotatE(self, head, relation, tail, mode):\n pi = 3.14159265358979323846\n\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n #Make phases of relations uniformly distributed in [-pi, pi]\n\n phase_relation = relation/(self.embedding_range.item()/pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n re_score = re_score - re_head\n im_score = im_score - im_head\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n re_score = re_score - re_tail\n im_score = im_score - im_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = self.gamma.item() - score.sum(dim = 2)\n return score\n\n def RotatEv2(self, head, relation, tail, mode, r_norm=None):\n pi = 3.14159265358979323846\n\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n #Make phases of relations uniformly distributed in [-pi, pi]\n phase_relation = relation/(self.embedding_range.item()/pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n re_relation_head, re_relation_tail = torch.chunk(re_relation, 2, dim=2)\n im_relation_head, im_relation_tail = torch.chunk(im_relation, 2, dim=2)\n\n re_score_head = re_head * re_relation_head - im_head * im_relation_head\n im_score_head = re_head * im_relation_head + im_head * re_relation_head\n\n re_score_tail = re_tail * re_relation_tail - im_tail * im_relation_tail\n im_score_tail = re_tail * im_relation_tail + im_tail * re_relation_tail\n\n re_score = re_score_head - re_score_tail\n im_score = im_score_head - im_score_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = self.gamma.item() - score.sum(dim = 2)\n return score\n\n def PairRE(self, head, relation, tail, mode):\n re_head, re_tail = torch.chunk(relation, 2, dim=2)\n\n head = F.normalize(head, 2, -1)\n tail = F.normalize(tail, 2, -1)\n\n score = head * re_head - tail * re_tail\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def CompoundE(self, head, relation, tail, mode):\n tail_scale, tail_translate, theta = torch.chunk(relation, 3, dim=2)\n theta, _ = torch.chunk(theta, 2, dim=2)\n\n head = F.normalize(head, 2, -1)\n tail = F.normalize(tail, 2, -1)\n \n pi = 3.14159265358979323846\n\n theta = theta/(self.embedding_range.item()/pi)\n\n re_rotation = torch.cos(theta)\n im_rotation = torch.sin(theta)\n\n re_rotation = re_rotation.unsqueeze(-1)\n im_rotation = im_rotation.unsqueeze(-1)\n\n tail = tail.view((tail.shape[0], tail.shape[1], -1, 2))\n\n tail_r = torch.cat((re_rotation * tail[:, :, :, 0:1], im_rotation * tail[:, :, :, 0:1]), dim=-1)\n tail_r += torch.cat((-im_rotation * tail[:, :, :, 1:], re_rotation * tail[:, :, :, 1:]), dim=-1)\n\n tail_r = tail_r.view((tail_r.shape[0], tail_r.shape[1], -1))\n\n tail_r += tail_translate\n tail_r *= tail_scale\n\n score = head - tail_r\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n\n def CompoundE3D_Complete_Mix_T_H(self, head, relation, tail, mode):\n head_translate, shear_tail_a, shear_tail_b = torch.chunk(relation, 3, dim=2)\n\n shear_tail_a = shear_tail_a.unsqueeze(-1)\n shear_tail_b = shear_tail_b.unsqueeze(-1)\n\n sh_y_x, sh_z_x, sh_x_y = torch.chunk(shear_tail_a, 3, dim=2)\n sh_z_y, sh_x_z, sh_y_z = torch.chunk(shear_tail_b, 3, dim=2)\n\n tail = tail.view((tail.shape[0], tail.shape[1], -1, 3))\n\n tail_r = torch.cat((tail[:, :, :, 0:1], sh_x_y * tail[:, :, :, 0:1], sh_x_z * tail[:, :, :, 0:1]), dim=-1)\n tail_r += torch.cat((sh_y_x * tail[:, :, :, 1:2], tail[:, :, :, 1:2], sh_y_z * tail[:, :, :, 1:2]), dim=-1)\n tail_r += torch.cat((sh_z_x * tail[:, :, :, 2:], sh_z_y * tail[:, :, :, 2:], tail[:, :, :, 2:]), dim=-1)\n\n tail_r = tail_r.view((tail_r.shape[0], tail_r.shape[1], -1))\n\n score = head + head_translate - tail_r\n score = self.gamma.item() - torch.norm(score, p=1, dim=2)\n return score\n \n @staticmethod\n def train_step(model, optimizer, train_iterator, args):\n '''\n A single train step. Apply back-propation and return the loss\n '''\n\n model.train()\n optimizer.zero_grad()\n positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)\n\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n subsampling_weight = subsampling_weight.cuda()\n\n negative_score = model((positive_sample, negative_sample), mode=mode)\n if args.negative_adversarial_sampling:\n #In self-adversarial sampling, we do not apply back-propagation on the sampling weight\n negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim = 1).detach()\n * F.logsigmoid(-negative_score)).sum(dim = 1)\n else:\n negative_score = F.logsigmoid(-negative_score).mean(dim = 1)\n\n positive_score = model(positive_sample)\n positive_score = F.logsigmoid(positive_score).squeeze(dim = 1)\n\n if args.uni_weight:\n positive_sample_loss = - positive_score.mean()\n negative_sample_loss = - negative_score.mean()\n else:\n positive_sample_loss = - (subsampling_weight * positive_score).sum()/subsampling_weight.sum()\n negative_sample_loss = - (subsampling_weight * negative_score).sum()/subsampling_weight.sum()\n\n loss = (positive_sample_loss + negative_sample_loss)/2\n\n if args.regularization != 0.0:\n #Use L3 regularization for ComplEx and DistMult\n regularization = args.regularization * (\n model.entity_embedding.norm(p = 3)**3 +\n model.relation_embedding.norm(p = 3).norm(p = 3)**3\n )\n loss = loss + regularization\n regularization_log = {'regularization': regularization.item()}\n else:\n regularization_log = {}\n\n loss.backward()\n\n optimizer.step()\n\n log = {\n **regularization_log,\n 'positive_sample_loss': positive_sample_loss.item(),\n 'negative_sample_loss': negative_sample_loss.item(),\n 'loss': loss.item()\n }\n\n return log\n\n @staticmethod\n def test_step(model, test_triples, args, random_sampling=False):\n '''\n Evaluate the model on test or valid datasets\n '''\n\n model.eval()\n\n #Prepare dataloader for evaluation\n test_dataloader_head = DataLoader(\n TestDataset(\n test_triples,\n args,\n 'head-batch',\n random_sampling\n ),\n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2),\n collate_fn=TestDataset.collate_fn\n )\n\n test_dataloader_tail = DataLoader(\n TestDataset(\n test_triples,\n args,\n 'tail-batch',\n random_sampling\n ),\n batch_size=args.test_batch_size,\n num_workers=max(1, args.cpu_num//2),\n collate_fn=TestDataset.collate_fn\n )\n\n test_dataset_list = [test_dataloader_head, test_dataloader_tail]\n\n test_logs = defaultdict(list)\n\n step = 0\n total_steps = sum([len(dataset) for dataset in test_dataset_list])\n\n with torch.no_grad():\n t1 = datetime.datetime.now().microsecond\n t3 = time.mktime(datetime.datetime.now().timetuple())\n for test_dataset in test_dataset_list:\n for positive_sample, negative_sample, mode in test_dataset:\n if args.cuda:\n positive_sample = positive_sample.cuda()\n negative_sample = negative_sample.cuda()\n\n batch_size = positive_sample.size(0)\n score = model((positive_sample, negative_sample), mode)\n\n batch_results = model.evaluator.eval({'y_pred_pos': score[:, 0],\n 'y_pred_neg': score[:, 1:]})\n for metric in batch_results:\n test_logs[metric].append(batch_results[metric])\n\n if step % args.test_log_steps == 0:\n logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))\n\n step += 1\n\n t2 = datetime.datetime.now().microsecond\n t4 = time.mktime(datetime.datetime.now().timetuple())\n strTime = 'funtion time use:%dms' % ((t4 - t3) * 1000 + (t2 - t1) / 1000)\n print (strTime)\n\n metrics = {}\n for metric in test_logs:\n metrics[metric] = torch.cat(test_logs[metric]).mean().item()\n\n return metrics"
},
{
"identifier": "TrainDataset",
"path": "dataloader.py",
"snippet": "class TrainDataset(Dataset):\n def __init__(self, triples, nentity, nrelation, negative_sample_size, mode, count, true_head, true_tail):\n self.len = len(triples['head'])\n self.triples = triples\n self.nentity = nentity\n self.nrelation = nrelation\n self.negative_sample_size = negative_sample_size\n self.mode = mode\n self.count = count\n self.true_head = true_head\n self.true_tail = true_tail\n \n def __len__(self):\n return self.len\n \n def __getitem__(self, idx):\n head, relation, tail = self.triples['head'][idx], self.triples['relation'][idx], self.triples['tail'][idx]\n positive_sample = [head, relation, tail]\n\n subsampling_weight = self.count[(head, relation)] + self.count[(tail, -relation-1)]\n subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))\n \n negative_sample = torch.randint(0, self.nentity, (self.negative_sample_size,))\n positive_sample = torch.LongTensor(positive_sample)\n \n return positive_sample, negative_sample, subsampling_weight, self.mode\n \n @staticmethod\n def collate_fn(data):\n positive_sample = torch.stack([_[0] for _ in data], dim=0)\n negative_sample = torch.stack([_[1] for _ in data], dim=0)\n subsample_weight = torch.cat([_[2] for _ in data], dim=0)\n mode = data[0][3]\n return positive_sample, negative_sample, subsample_weight, mode"
},
{
"identifier": "BidirectionalOneShotIterator",
"path": "dataloader.py",
"snippet": "class BidirectionalOneShotIterator(object):\n def __init__(self, dataloader_head, dataloader_tail):\n self.iterator_head = self.one_shot_iterator(dataloader_head)\n self.iterator_tail = self.one_shot_iterator(dataloader_tail)\n self.step = 0\n \n def __next__(self):\n self.step += 1\n if self.step % 2 == 0:\n data = next(self.iterator_head)\n else:\n data = next(self.iterator_tail)\n return data\n \n @staticmethod\n def one_shot_iterator(dataloader):\n '''\n Transform a PyTorch Dataloader into python iterator\n '''\n while True:\n for data in dataloader:\n yield data"
}
] | import argparse
import json
import logging
import os
import random
import numpy as np
import torch
import time
import os.path as osp
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
from tensorboardX import SummaryWriter | 7,747 |
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
triple_relation_embedding=args.triple_relation_embedding,
quad_relation_embedding=args.quad_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
| #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg')
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-tr', '--triple_relation_embedding', action='store_true')
parser.add_argument('-qr', '--quad_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
parser.add_argument('--relation_type', type=str, default='all', help='1-1, 1-n, n-1, n-n')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.triple_relation_embedding = argparse_dict['triple_relation_embedding']
args.quad_relation_embedding = argparse_dict['quad_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
print(log_file)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
triple_relation_embedding=args.triple_relation_embedding,
quad_relation_embedding=args.quad_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader( | TrainDataset(train_triples, nentity, nrelation, | 1 | 2023-12-29 22:57:53+00:00 | 12k |
daswer123/rvc-python | rvc_python/modules/vc/modules.py | [
{
"identifier": "load_audio",
"path": "rvc_python/lib/audio.py",
"snippet": "def load_audio(file, sr):\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n if os.path.exists(file) == False:\n raise RuntimeError(\n \"You input a wrong audio path that does not exists, please fix it!\"\n )\n try:\n with open(file, \"rb\") as f:\n with BytesIO() as out:\n audio2(f, out, \"f32le\", sr)\n return np.frombuffer(out.getvalue(), np.float32).flatten()\n\n except AttributeError:\n audio = file[1] / 32768.0\n if len(audio.shape) == 2:\n audio = np.mean(audio, -1)\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\n\n except:\n raise RuntimeError(traceback.format_exc())"
},
{
"identifier": "wav2",
"path": "rvc_python/lib/audio.py",
"snippet": "def wav2(i, o, format):\n inp = av.open(i, \"rb\")\n if format == \"m4a\":\n format = \"mp4\"\n out = av.open(o, \"wb\", format=format)\n if format == \"ogg\":\n format = \"libvorbis\"\n if format == \"mp4\":\n format = \"aac\"\n\n ostream = out.add_stream(format)\n\n for frame in inp.decode(audio=0):\n for p in ostream.encode(frame):\n out.mux(p)\n\n for p in ostream.encode(None):\n out.mux(p)\n\n out.close()\n inp.close()"
},
{
"identifier": "SynthesizerTrnMs256NSFsid",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs256NSFsid_nono",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "SynthesizerTrnMs768NSFsid_nono",
"path": "rvc_python/lib/infer_pack/models.py",
"snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)"
},
{
"identifier": "Pipeline",
"path": "rvc_python/modules/vc/pipeline.py",
"snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config, lib_dir):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n self.lib_dir = lib_dir\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc_python.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model - base_models/rmvpe.pth\"\n )\n rmvpe_path = Path(f\"{self.lib_dir}\\\\base_model\\\\rmvpe.pt\")\n self.model_rmvpe = RMVPE(\n rmvpe_path,\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n # print(sid)\n # sid = os.path.abspath(sid)\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt"
}
] | import traceback
import logging
import numpy as np
import soundfile as sf
import torch
from io import BytesIO
from rvc_python.lib.audio import load_audio, wav2
from rvc_python.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc_python.modules.vc.pipeline import Pipeline
from rvc_python.modules.vc.utils import * | 9,846 |
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
del self.net_g, self.cpt
if torch.cuda.is_available():
torch.cuda.empty_cache()
return (
{"visible": False, "__type__": "update"},
{
"visible": True,
"value": to_return_protect0,
"__type__": "update",
},
{
"visible": True,
"value": to_return_protect1,
"__type__": "update",
},
"",
"",
)
person = f'{sid}'
logger.info(f"Loading: {person}")
# print(sid,person)
self.cpt = torch.load(sid, map_location="cpu")
self.tgt_sr = self.cpt["config"][-1]
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
self.if_f0 = self.cpt.get("f0", 1)
self.version = version
synthesizer_class = {
("v1", 1): SynthesizerTrnMs256NSFsid,
("v1", 0): SynthesizerTrnMs256NSFsid_nono,
("v2", 1): SynthesizerTrnMs768NSFsid,
("v2", 0): SynthesizerTrnMs768NSFsid_nono,
}
self.net_g = synthesizer_class.get(
(self.version, self.if_f0), SynthesizerTrnMs256NSFsid
)(*self.cpt["config"], is_half=self.config.is_half)
del self.net_g.enc_q
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
self.net_g.eval().to(self.config.device)
if self.config.is_half:
self.net_g = self.net_g.half()
else:
self.net_g = self.net_g.float()
|
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
del self.net_g, self.cpt
if torch.cuda.is_available():
torch.cuda.empty_cache()
return (
{"visible": False, "__type__": "update"},
{
"visible": True,
"value": to_return_protect0,
"__type__": "update",
},
{
"visible": True,
"value": to_return_protect1,
"__type__": "update",
},
"",
"",
)
person = f'{sid}'
logger.info(f"Loading: {person}")
# print(sid,person)
self.cpt = torch.load(sid, map_location="cpu")
self.tgt_sr = self.cpt["config"][-1]
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
self.if_f0 = self.cpt.get("f0", 1)
self.version = version
synthesizer_class = {
("v1", 1): SynthesizerTrnMs256NSFsid,
("v1", 0): SynthesizerTrnMs256NSFsid_nono,
("v2", 1): SynthesizerTrnMs768NSFsid,
("v2", 0): SynthesizerTrnMs768NSFsid_nono,
}
self.net_g = synthesizer_class.get(
(self.version, self.if_f0), SynthesizerTrnMs256NSFsid
)(*self.cpt["config"], is_half=self.config.is_half)
del self.net_g.enc_q
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
self.net_g.eval().to(self.config.device)
if self.config.is_half:
self.net_g = self.net_g.half()
else:
self.net_g = self.net_g.float()
| self.pipeline = Pipeline(self.tgt_sr, self.config,lib_dir=self.lib_dir) | 6 | 2023-12-26 19:05:42+00:00 | 12k |
open-mmlab/Amphion | models/svc/vits/vits.py | [
{
"identifier": "f0_to_coarse",
"path": "utils/f0.py",
"snippet": "def f0_to_coarse(f0, pitch_bin, pitch_min, pitch_max):\n ## TODO: Figure out the detail of this function\n\n f0_mel_min = 1127 * np.log(1 + pitch_min / 700)\n f0_mel_max = 1127 * np.log(1 + pitch_max / 700)\n\n is_torch = isinstance(f0, torch.Tensor)\n f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (pitch_bin - 2) / (\n f0_mel_max - f0_mel_min\n ) + 1\n\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > pitch_bin - 1] = pitch_bin - 1\n f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int32)\n assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (\n f0_coarse.max(),\n f0_coarse.min(),\n )\n return f0_coarse"
},
{
"identifier": "Encoder",
"path": "modules/transformer/attentions.py",
"snippet": "class Encoder(nn.Module):\n def __init__(\n self,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size=1,\n p_dropout=0.0,\n window_size=4,\n **kwargs,\n ):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.window_size = window_size\n\n self.drop = nn.Dropout(p_dropout)\n self.attn_layers = nn.ModuleList()\n self.norm_layers_1 = nn.ModuleList()\n self.ffn_layers = nn.ModuleList()\n self.norm_layers_2 = nn.ModuleList()\n for i in range(self.n_layers):\n self.attn_layers.append(\n MultiHeadAttention(\n hidden_channels,\n hidden_channels,\n n_heads,\n p_dropout=p_dropout,\n window_size=window_size,\n )\n )\n self.norm_layers_1.append(LayerNorm(hidden_channels))\n self.ffn_layers.append(\n FFN(\n hidden_channels,\n hidden_channels,\n filter_channels,\n kernel_size,\n p_dropout=p_dropout,\n )\n )\n self.norm_layers_2.append(LayerNorm(hidden_channels))\n\n def forward(self, x, x_mask):\n attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)\n x = x * x_mask\n for i in range(self.n_layers):\n y = self.attn_layers[i](x, x, attn_mask)\n y = self.drop(y)\n x = self.norm_layers_1[i](x + y)\n\n y = self.ffn_layers[i](x, x_mask)\n y = self.drop(y)\n x = self.norm_layers_2[i](x + y)\n x = x * x_mask\n return x"
},
{
"identifier": "ResidualCouplingBlock",
"path": "models/tts/vits/vits.py",
"snippet": "class ResidualCouplingBlock(nn.Module):\n def __init__(\n self,\n channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n n_flows=4,\n gin_channels=0,\n ):\n super().__init__()\n self.channels = channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.dilation_rate = dilation_rate\n self.n_layers = n_layers\n self.n_flows = n_flows\n self.gin_channels = gin_channels\n\n self.flows = nn.ModuleList()\n for i in range(n_flows):\n self.flows.append(\n ResidualCouplingLayer(\n channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=gin_channels,\n mean_only=True,\n )\n )\n self.flows.append(Flip())\n\n def forward(self, x, x_mask, g=None, reverse=False):\n if not reverse:\n for flow in self.flows:\n x, _ = flow(x, x_mask, g=g, reverse=reverse)\n else:\n for flow in reversed(self.flows):\n x = flow(x, x_mask, g=g, reverse=reverse)\n return x"
},
{
"identifier": "PosteriorEncoder",
"path": "models/tts/vits/vits.py",
"snippet": "class PosteriorEncoder(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=0,\n ):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.dilation_rate = dilation_rate\n self.n_layers = n_layers\n self.gin_channels = gin_channels\n\n self.pre = nn.Conv1d(in_channels, hidden_channels, 1)\n self.enc = WN(\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=gin_channels,\n )\n self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)\n\n def forward(self, x, x_lengths, g=None):\n x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)\n x = self.pre(x) * x_mask\n x = self.enc(x, x_mask, g=g)\n stats = self.proj(x) * x_mask\n m, logs = torch.split(stats, self.out_channels, dim=1)\n z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask\n return z, m, logs, x_mask"
},
{
"identifier": "BigVGAN",
"path": "models/vocoders/gan/generator/bigvgan.py",
"snippet": "class BigVGAN(torch.nn.Module):\n def __init__(self, cfg):\n super(BigVGAN, self).__init__()\n self.cfg = cfg\n\n self.num_kernels = len(cfg.model.bigvgan.resblock_kernel_sizes)\n self.num_upsamples = len(cfg.model.bigvgan.upsample_rates)\n\n # Conv pre to boost channels\n self.conv_pre = weight_norm(\n Conv1d(\n cfg.preprocess.n_mel,\n cfg.model.bigvgan.upsample_initial_channel,\n 7,\n 1,\n padding=3,\n )\n )\n\n resblock = AMPBlock1 if cfg.model.bigvgan.resblock == \"1\" else AMPBlock2\n\n # Upsamplers\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(\n zip(\n cfg.model.bigvgan.upsample_rates,\n cfg.model.bigvgan.upsample_kernel_sizes,\n )\n ):\n self.ups.append(\n nn.ModuleList(\n [\n weight_norm(\n ConvTranspose1d(\n cfg.model.bigvgan.upsample_initial_channel // (2**i),\n cfg.model.bigvgan.upsample_initial_channel\n // (2 ** (i + 1)),\n k,\n u,\n padding=(k - u) // 2,\n )\n )\n ]\n )\n )\n\n # Res Blocks with AMP and Anti-aliasing\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = cfg.model.bigvgan.upsample_initial_channel // (2 ** (i + 1))\n for j, (k, d) in enumerate(\n zip(\n cfg.model.bigvgan.resblock_kernel_sizes,\n cfg.model.bigvgan.resblock_dilation_sizes,\n )\n ):\n self.resblocks.append(\n resblock(cfg, ch, k, d, activation=cfg.model.bigvgan.activation)\n )\n\n # Conv post for result\n if cfg.model.bigvgan.activation == \"snake\":\n activation_post = Snake(ch, alpha_logscale=cfg.model.bigvgan.snake_logscale)\n self.activation_post = Activation1d(activation=activation_post)\n elif cfg.model.bigvgan.activation == \"snakebeta\":\n activation_post = SnakeBeta(\n ch, alpha_logscale=cfg.model.bigvgan.snake_logscale\n )\n self.activation_post = Activation1d(activation=activation_post)\n else:\n raise NotImplementedError(\n \"activation incorrectly specified. check the config file and look for 'activation'.\"\n )\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n\n # Weight Norm\n for i in range(len(self.ups)):\n self.ups[i].apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n\n for i in range(self.num_upsamples):\n for i_up in range(len(self.ups[i])):\n x = self.ups[i][i_up](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n\n x = self.activation_post(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print(\"Removing weight norm...\")\n for l in self.ups:\n for l_i in l:\n remove_weight_norm(l_i)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
},
{
"identifier": "HiFiGAN",
"path": "models/vocoders/gan/generator/hifigan.py",
"snippet": "class HiFiGAN(torch.nn.Module):\n def __init__(self, cfg):\n super(HiFiGAN, self).__init__()\n self.cfg = cfg\n self.num_kernels = len(self.cfg.model.hifigan.resblock_kernel_sizes)\n self.num_upsamples = len(self.cfg.model.hifigan.upsample_rates)\n self.conv_pre = weight_norm(\n Conv1d(\n cfg.preprocess.n_mel,\n self.cfg.model.hifigan.upsample_initial_channel,\n 7,\n 1,\n padding=3,\n )\n )\n resblock = ResBlock1 if self.cfg.model.hifigan.resblock == \"1\" else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(\n zip(\n self.cfg.model.hifigan.upsample_rates,\n self.cfg.model.hifigan.upsample_kernel_sizes,\n )\n ):\n self.ups.append(\n weight_norm(\n ConvTranspose1d(\n self.cfg.model.hifigan.upsample_initial_channel // (2**i),\n self.cfg.model.hifigan.upsample_initial_channel\n // (2 ** (i + 1)),\n k,\n u,\n padding=(k - u) // 2,\n )\n )\n )\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = self.cfg.model.hifigan.upsample_initial_channel // (2 ** (i + 1))\n for j, (k, d) in enumerate(\n zip(\n self.cfg.model.hifigan.resblock_kernel_sizes,\n self.cfg.model.hifigan.resblock_dilation_sizes,\n )\n ):\n self.resblocks.append(resblock(self.cfg, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print(\"Removing weight norm...\")\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
},
{
"identifier": "NSFHiFiGAN",
"path": "models/vocoders/gan/generator/nsfhifigan.py",
"snippet": "class NSFHiFiGAN(nn.Module):\n def __init__(self, cfg):\n super(NSFHiFiGAN, self).__init__()\n\n self.cfg = cfg\n self.num_kernels = len(self.cfg.model.nsfhifigan.resblock_kernel_sizes)\n self.num_upsamples = len(self.cfg.model.nsfhifigan.upsample_rates)\n self.m_source = SourceModuleHnNSF(\n fs=self.cfg.preprocess.sample_rate,\n harmonic_num=self.cfg.model.nsfhifigan.harmonic_num,\n )\n self.noise_convs = nn.ModuleList()\n self.conv_pre = weight_norm(\n Conv1d(\n self.cfg.preprocess.n_mel,\n self.cfg.model.nsfhifigan.upsample_initial_channel,\n 7,\n 1,\n padding=3,\n )\n )\n\n resblock = ResBlock1 if self.cfg.model.nsfhifigan.resblock == \"1\" else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(\n zip(\n self.cfg.model.nsfhifigan.upsample_rates,\n self.cfg.model.nsfhifigan.upsample_kernel_sizes,\n )\n ):\n c_cur = self.cfg.model.nsfhifigan.upsample_initial_channel // (2 ** (i + 1))\n self.ups.append(\n weight_norm(\n ConvTranspose1d(\n self.cfg.model.nsfhifigan.upsample_initial_channel // (2**i),\n self.cfg.model.nsfhifigan.upsample_initial_channel\n // (2 ** (i + 1)),\n k,\n u,\n padding=(k - u) // 2,\n )\n )\n )\n if i + 1 < len(self.cfg.model.nsfhifigan.upsample_rates):\n stride_f0 = int(\n np.prod(self.cfg.model.nsfhifigan.upsample_rates[i + 1 :])\n )\n self.noise_convs.append(\n Conv1d(\n 1,\n c_cur,\n kernel_size=stride_f0 * 2,\n stride=stride_f0,\n padding=stride_f0 // 2,\n )\n )\n else:\n self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))\n\n self.resblocks = nn.ModuleList()\n ch = self.cfg.model.nsfhifigan.upsample_initial_channel\n for i in range(len(self.ups)):\n ch //= 2\n for j, (k, d) in enumerate(\n zip(\n self.cfg.model.nsfhifigan.resblock_kernel_sizes,\n self.cfg.model.nsfhifigan.resblock_dilation_sizes,\n )\n ):\n self.resblocks.append(resblock(cfg, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.upp = int(np.prod(self.cfg.model.nsfhifigan.upsample_rates))\n\n def forward(self, x, f0):\n har_source = self.m_source(f0, self.upp).transpose(1, 2)\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n x_source = self.noise_convs[i](har_source)\n\n length = min(x.shape[-1], x_source.shape[-1])\n x = x[:, :, :length]\n x_source = x[:, :, :length]\n\n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x"
},
{
"identifier": "MelGAN",
"path": "models/vocoders/gan/generator/melgan.py",
"snippet": "class MelGAN(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n self.cfg = cfg\n\n self.hop_length = np.prod(self.cfg.model.melgan.ratios)\n mult = int(2 ** len(self.cfg.model.melgan.ratios))\n\n model = [\n nn.ReflectionPad1d(3),\n WNConv1d(\n self.cfg.preprocess.n_mel,\n mult * self.cfg.model.melgan.ngf,\n kernel_size=7,\n padding=0,\n ),\n ]\n\n # Upsample to raw audio scale\n for i, r in enumerate(self.cfg.model.melgan.ratios):\n model += [\n nn.LeakyReLU(0.2),\n WNConvTranspose1d(\n mult * self.cfg.model.melgan.ngf,\n mult * self.cfg.model.melgan.ngf // 2,\n kernel_size=r * 2,\n stride=r,\n padding=r // 2 + r % 2,\n output_padding=r % 2,\n ),\n ]\n\n for j in range(self.cfg.model.melgan.n_residual_layers):\n model += [\n ResnetBlock(mult * self.cfg.model.melgan.ngf // 2, dilation=3**j)\n ]\n\n mult //= 2\n\n model += [\n nn.LeakyReLU(0.2),\n nn.ReflectionPad1d(3),\n WNConv1d(self.cfg.model.melgan.ngf, 1, kernel_size=7, padding=0),\n nn.Tanh(),\n ]\n\n self.model = nn.Sequential(*model)\n self.apply(weights_init)\n\n def forward(self, x):\n return self.model(x)"
},
{
"identifier": "APNet",
"path": "models/vocoders/gan/generator/apnet.py",
"snippet": "class APNet(torch.nn.Module):\n def __init__(self, cfg):\n super(APNet, self).__init__()\n self.cfg = cfg\n self.ASP_num_kernels = len(cfg.model.apnet.ASP_resblock_kernel_sizes)\n self.PSP_num_kernels = len(cfg.model.apnet.PSP_resblock_kernel_sizes)\n\n self.ASP_input_conv = weight_norm(\n Conv1d(\n cfg.preprocess.n_mel,\n cfg.model.apnet.ASP_channel,\n cfg.model.apnet.ASP_input_conv_kernel_size,\n 1,\n padding=get_padding(cfg.model.apnet.ASP_input_conv_kernel_size, 1),\n )\n )\n self.PSP_input_conv = weight_norm(\n Conv1d(\n cfg.preprocess.n_mel,\n cfg.model.apnet.PSP_channel,\n cfg.model.apnet.PSP_input_conv_kernel_size,\n 1,\n padding=get_padding(cfg.model.apnet.PSP_input_conv_kernel_size, 1),\n )\n )\n\n self.ASP_ResNet = nn.ModuleList()\n for j, (k, d) in enumerate(\n zip(\n cfg.model.apnet.ASP_resblock_kernel_sizes,\n cfg.model.apnet.ASP_resblock_dilation_sizes,\n )\n ):\n self.ASP_ResNet.append(ASPResBlock(cfg, cfg.model.apnet.ASP_channel, k, d))\n\n self.PSP_ResNet = nn.ModuleList()\n for j, (k, d) in enumerate(\n zip(\n cfg.model.apnet.PSP_resblock_kernel_sizes,\n cfg.model.apnet.PSP_resblock_dilation_sizes,\n )\n ):\n self.PSP_ResNet.append(PSPResBlock(cfg, cfg.model.apnet.PSP_channel, k, d))\n\n self.ASP_output_conv = weight_norm(\n Conv1d(\n cfg.model.apnet.ASP_channel,\n cfg.preprocess.n_fft // 2 + 1,\n cfg.model.apnet.ASP_output_conv_kernel_size,\n 1,\n padding=get_padding(cfg.model.apnet.ASP_output_conv_kernel_size, 1),\n )\n )\n self.PSP_output_R_conv = weight_norm(\n Conv1d(\n cfg.model.apnet.PSP_channel,\n cfg.preprocess.n_fft // 2 + 1,\n cfg.model.apnet.PSP_output_R_conv_kernel_size,\n 1,\n padding=get_padding(cfg.model.apnet.PSP_output_R_conv_kernel_size, 1),\n )\n )\n self.PSP_output_I_conv = weight_norm(\n Conv1d(\n cfg.model.apnet.PSP_channel,\n cfg.preprocess.n_fft // 2 + 1,\n cfg.model.apnet.PSP_output_I_conv_kernel_size,\n 1,\n padding=get_padding(cfg.model.apnet.PSP_output_I_conv_kernel_size, 1),\n )\n )\n\n self.iSTFT = ISTFT(\n self.cfg.preprocess.n_fft,\n hop_length=self.cfg.preprocess.hop_size,\n win_length=self.cfg.preprocess.win_size,\n )\n\n self.ASP_output_conv.apply(init_weights)\n self.PSP_output_R_conv.apply(init_weights)\n self.PSP_output_I_conv.apply(init_weights)\n\n def forward(self, mel):\n logamp = self.ASP_input_conv(mel)\n logamps = None\n for j in range(self.ASP_num_kernels):\n if logamps is None:\n logamps = self.ASP_ResNet[j](logamp)\n else:\n logamps += self.ASP_ResNet[j](logamp)\n logamp = logamps / self.ASP_num_kernels\n logamp = F.leaky_relu(logamp)\n logamp = self.ASP_output_conv(logamp)\n\n pha = self.PSP_input_conv(mel)\n phas = None\n for j in range(self.PSP_num_kernels):\n if phas is None:\n phas = self.PSP_ResNet[j](pha)\n else:\n phas += self.PSP_ResNet[j](pha)\n pha = phas / self.PSP_num_kernels\n pha = F.leaky_relu(pha)\n R = self.PSP_output_R_conv(pha)\n I = self.PSP_output_I_conv(pha)\n\n pha = torch.atan2(I, R)\n\n rea = torch.exp(logamp) * torch.cos(pha)\n imag = torch.exp(logamp) * torch.sin(pha)\n\n spec = torch.cat((rea.unsqueeze(-1), imag.unsqueeze(-1)), -1)\n\n spec = torch.view_as_complex(spec)\n\n audio = self.iSTFT.forward(\n spec, torch.hann_window(self.cfg.preprocess.win_size).to(mel.device)\n )\n\n return logamp, pha, rea, imag, audio.unsqueeze(1)"
},
{
"identifier": "ConditionEncoder",
"path": "modules/encoder/condition_encoder.py",
"snippet": "class ConditionEncoder(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n\n self.merge_mode = cfg.merge_mode\n\n if cfg.use_whisper:\n self.whisper_encoder = ContentEncoder(\n self.cfg, self.cfg.whisper_dim, self.cfg.content_encoder_dim\n )\n\n if cfg.use_contentvec:\n self.contentvec_encoder = ContentEncoder(\n self.cfg, self.cfg.contentvec_dim, self.cfg.content_encoder_dim\n )\n\n if cfg.use_mert:\n self.mert_encoder = ContentEncoder(\n self.cfg, self.cfg.mert_dim, self.cfg.content_encoder_dim\n )\n\n if cfg.use_wenet:\n self.wenet_encoder = ContentEncoder(\n self.cfg, self.cfg.wenet_dim, self.cfg.content_encoder_dim\n )\n\n self.melody_encoder = MelodyEncoder(self.cfg)\n self.loudness_encoder = LoudnessEncoder(self.cfg)\n if cfg.use_spkid:\n self.singer_encoder = SingerEncoder(self.cfg)\n\n def forward(self, x):\n outputs = []\n\n if \"frame_pitch\" in x.keys():\n if \"frame_uv\" not in x.keys():\n x[\"frame_uv\"] = None\n pitch_enc_out = self.melody_encoder(\n x[\"frame_pitch\"], uv=x[\"frame_uv\"], length=x[\"target_len\"]\n )\n outputs.append(pitch_enc_out)\n\n if \"frame_energy\" in x.keys():\n loudness_enc_out = self.loudness_encoder(x[\"frame_energy\"])\n outputs.append(loudness_enc_out)\n\n if \"whisper_feat\" in x.keys():\n # whisper_feat: [b, T, 1024]\n whiser_enc_out = self.whisper_encoder(\n x[\"whisper_feat\"], length=x[\"target_len\"]\n )\n outputs.append(whiser_enc_out)\n seq_len = whiser_enc_out.shape[1]\n\n if \"contentvec_feat\" in x.keys():\n contentvec_enc_out = self.contentvec_encoder(\n x[\"contentvec_feat\"], length=x[\"target_len\"]\n )\n outputs.append(contentvec_enc_out)\n seq_len = contentvec_enc_out.shape[1]\n\n if \"mert_feat\" in x.keys():\n mert_enc_out = self.mert_encoder(x[\"mert_feat\"], length=x[\"target_len\"])\n outputs.append(mert_enc_out)\n seq_len = mert_enc_out.shape[1]\n\n if \"wenet_feat\" in x.keys():\n wenet_enc_out = self.wenet_encoder(x[\"wenet_feat\"], length=x[\"target_len\"])\n outputs.append(wenet_enc_out)\n seq_len = wenet_enc_out.shape[1]\n\n if \"spk_id\" in x.keys():\n speaker_enc_out = self.singer_encoder(x[\"spk_id\"]) # [b, 1, 384]\n assert (\n \"whisper_feat\" in x.keys()\n or \"contentvec_feat\" in x.keys()\n or \"mert_feat\" in x.keys()\n or \"wenet_feat\" in x.keys()\n )\n singer_info = speaker_enc_out.expand(-1, seq_len, -1)\n outputs.append(singer_info)\n\n encoder_output = None\n if self.merge_mode == \"concat\":\n encoder_output = torch.cat(outputs, dim=-1)\n if self.merge_mode == \"add\":\n # (#modules, N, seq_len, output_dim)\n outputs = torch.cat([out[None, :, :, :] for out in outputs], dim=0)\n # (N, seq_len, output_dim)\n encoder_output = torch.sum(outputs, dim=0)\n\n return encoder_output"
}
] | import copy
import torch
from torch import nn
from torch.nn import functional as F
from utils.util import *
from utils.f0 import f0_to_coarse
from modules.transformer.attentions import Encoder
from models.tts.vits.vits import ResidualCouplingBlock, PosteriorEncoder
from models.vocoders.gan.generator.bigvgan import BigVGAN
from models.vocoders.gan.generator.hifigan import HiFiGAN
from models.vocoders.gan.generator.nsfhifigan import NSFHiFiGAN
from models.vocoders.gan.generator.melgan import MelGAN
from models.vocoders.gan.generator.apnet import APNet
from modules.encoder.condition_encoder import ConditionEncoder | 7,993 | for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
return ret, ret_pitch, ids_str
class ContentEncoder(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
kernel_size,
n_layers,
gin_channels=0,
filter_channels=None,
n_heads=None,
p_dropout=None,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.gin_channels = gin_channels
self.f0_emb = nn.Embedding(256, hidden_channels)
self.enc_ = Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
# condition_encoder ver.
def forward(self, x, x_mask, noice_scale=1):
x = self.enc_(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
return z, m, logs, x_mask
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(self, spec_channels, segment_size, cfg):
super().__init__()
self.spec_channels = spec_channels
self.segment_size = segment_size
self.cfg = cfg
self.inter_channels = cfg.model.vits.inter_channels
self.hidden_channels = cfg.model.vits.hidden_channels
self.filter_channels = cfg.model.vits.filter_channels
self.n_heads = cfg.model.vits.n_heads
self.n_layers = cfg.model.vits.n_layers
self.kernel_size = cfg.model.vits.kernel_size
self.p_dropout = cfg.model.vits.p_dropout
self.ssl_dim = cfg.model.vits.ssl_dim
self.n_flow_layer = cfg.model.vits.n_flow_layer
self.gin_channels = cfg.model.vits.gin_channels
self.n_speakers = cfg.model.vits.n_speakers
# f0
self.n_bins = cfg.preprocess.pitch_bin
self.f0_min = cfg.preprocess.f0_min
self.f0_max = cfg.preprocess.f0_max
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.emb_g = nn.Embedding(self.n_speakers, self.gin_channels)
self.enc_p = ContentEncoder(
self.inter_channels,
self.hidden_channels,
filter_channels=self.filter_channels,
n_heads=self.n_heads,
n_layers=self.n_layers,
kernel_size=self.kernel_size,
p_dropout=self.p_dropout,
)
assert cfg.model.generator in [
"bigvgan",
"hifigan",
"melgan",
"nsfhifigan",
"apnet",
]
self.dec_name = cfg.model.generator
temp_cfg = copy.deepcopy(cfg)
temp_cfg.preprocess.n_mel = self.inter_channels
if cfg.model.generator == "bigvgan":
temp_cfg.model.bigvgan = cfg.model.generator_config.bigvgan
self.dec = BigVGAN(temp_cfg)
elif cfg.model.generator == "hifigan":
temp_cfg.model.hifigan = cfg.model.generator_config.hifigan
self.dec = HiFiGAN(temp_cfg)
elif cfg.model.generator == "melgan":
temp_cfg.model.melgan = cfg.model.generator_config.melgan
self.dec = MelGAN(temp_cfg)
elif cfg.model.generator == "nsfhifigan":
temp_cfg.model.nsfhifigan = cfg.model.generator_config.nsfhifigan
| # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This code is modified from https://github.com/svc-develop-team/so-vits-svc/blob/4.1-Stable/models.py
def slice_pitch_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
return ret, ret_pitch, ids_str
class ContentEncoder(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
kernel_size,
n_layers,
gin_channels=0,
filter_channels=None,
n_heads=None,
p_dropout=None,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.gin_channels = gin_channels
self.f0_emb = nn.Embedding(256, hidden_channels)
self.enc_ = Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
# condition_encoder ver.
def forward(self, x, x_mask, noice_scale=1):
x = self.enc_(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
return z, m, logs, x_mask
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(self, spec_channels, segment_size, cfg):
super().__init__()
self.spec_channels = spec_channels
self.segment_size = segment_size
self.cfg = cfg
self.inter_channels = cfg.model.vits.inter_channels
self.hidden_channels = cfg.model.vits.hidden_channels
self.filter_channels = cfg.model.vits.filter_channels
self.n_heads = cfg.model.vits.n_heads
self.n_layers = cfg.model.vits.n_layers
self.kernel_size = cfg.model.vits.kernel_size
self.p_dropout = cfg.model.vits.p_dropout
self.ssl_dim = cfg.model.vits.ssl_dim
self.n_flow_layer = cfg.model.vits.n_flow_layer
self.gin_channels = cfg.model.vits.gin_channels
self.n_speakers = cfg.model.vits.n_speakers
# f0
self.n_bins = cfg.preprocess.pitch_bin
self.f0_min = cfg.preprocess.f0_min
self.f0_max = cfg.preprocess.f0_max
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.emb_g = nn.Embedding(self.n_speakers, self.gin_channels)
self.enc_p = ContentEncoder(
self.inter_channels,
self.hidden_channels,
filter_channels=self.filter_channels,
n_heads=self.n_heads,
n_layers=self.n_layers,
kernel_size=self.kernel_size,
p_dropout=self.p_dropout,
)
assert cfg.model.generator in [
"bigvgan",
"hifigan",
"melgan",
"nsfhifigan",
"apnet",
]
self.dec_name = cfg.model.generator
temp_cfg = copy.deepcopy(cfg)
temp_cfg.preprocess.n_mel = self.inter_channels
if cfg.model.generator == "bigvgan":
temp_cfg.model.bigvgan = cfg.model.generator_config.bigvgan
self.dec = BigVGAN(temp_cfg)
elif cfg.model.generator == "hifigan":
temp_cfg.model.hifigan = cfg.model.generator_config.hifigan
self.dec = HiFiGAN(temp_cfg)
elif cfg.model.generator == "melgan":
temp_cfg.model.melgan = cfg.model.generator_config.melgan
self.dec = MelGAN(temp_cfg)
elif cfg.model.generator == "nsfhifigan":
temp_cfg.model.nsfhifigan = cfg.model.generator_config.nsfhifigan | self.dec = NSFHiFiGAN(temp_cfg) # TODO: nsf need f0 | 6 | 2023-11-15 09:19:27+00:00 | 12k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/nodes.py | [
{
"identifier": "load_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def load_controlnet(ckpt_path, timestep_keyframe: TimestepKeyframeGroupImport=None, model=None):\n control = comfy_cn.load_controlnet(ckpt_path, model=model)\n # TODO: support controlnet-lllite\n # if is None, see if is a non-vanilla ControlNet\n # if control is None:\n # controlnet_data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)\n # # check if lllite\n # if \"lllite_unet\" in controlnet_data:\n # pass\n return convert_to_advanced(control, timestep_keyframe=timestep_keyframe)"
},
{
"identifier": "convert_to_advanced",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def convert_to_advanced(control, timestep_keyframe: TimestepKeyframeGroupImport=None):\n # if already advanced, leave it be\n if is_advanced_controlnet(control):\n return control\n # if exactly ControlNet returned, transform it into ControlNetAdvancedImport\n if type(control) == ControlNet:\n return ControlNetAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # if exactly ControlLora returned, transform it into ControlLoraAdvancedImport\n elif type(control) == ControlLora:\n return ControlLoraAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # if T2IAdapter returned, transform it into T2IAdapterAdvancedImport\n elif isinstance(control, T2IAdapter):\n return T2IAdapterAdvancedImport.from_vanilla(v=control, timestep_keyframe=timestep_keyframe)\n # otherwise, leave it be - might be something I am not supporting yet\n return control"
},
{
"identifier": "ControlWeightsImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class ControlWeightsImport:\n def __init__(self, weight_type: str, base_multiplier: float=1.0, flip_weights: bool=False, weights: list[float]=None, weight_mask: Tensor=None):\n self.weight_type = weight_type\n self.base_multiplier = base_multiplier\n self.flip_weights = flip_weights\n self.weights = weights\n if self.weights is not None and self.flip_weights:\n self.weights.reverse()\n self.weight_mask = weight_mask\n\n def get(self, idx: int) -> Union[float, Tensor]:\n # if weights is not none, return index\n if self.weights is not None:\n return self.weights[idx]\n return 1.0\n\n @classmethod\n def default(cls):\n return cls(ControlWeightTypeImport.DEFAULT)\n\n @classmethod\n def universal(cls, base_multiplier: float, flip_weights: bool=False):\n return cls(ControlWeightTypeImport.UNIVERSAL, base_multiplier=base_multiplier, flip_weights=flip_weights)\n \n @classmethod\n def universal_mask(cls, weight_mask: Tensor):\n return cls(ControlWeightTypeImport.UNIVERSAL, weight_mask=weight_mask)\n\n @classmethod\n def t2iadapter(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*12\n return cls(ControlWeightTypeImport.T2IADAPTER, weights=weights,flip_weights=flip_weights)\n\n @classmethod\n def controlnet(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*13\n return cls(ControlWeightTypeImport.CONTROLNET, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllora(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*10\n return cls(ControlWeightTypeImport.CONTROLLORA, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllllite(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n # TODO: make this have a real value\n weights = [1.0]*200\n return cls(ControlWeightTypeImport.CONTROLLLLITE, weights=weights, flip_weights=flip_weights)"
},
{
"identifier": "ControlWeightTypeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class ControlWeightTypeImport:\n DEFAULT = \"default\"\n UNIVERSAL = \"universal\"\n T2IADAPTER = \"t2iadapter\"\n CONTROLNET = \"controlnet\"\n CONTROLLORA = \"controllora\"\n CONTROLLLLITE = \"controllllite\""
},
{
"identifier": "LatentKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class LatentKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[LatentKeyframeImport] = []\n\n def add(self, keyframe: LatentKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same batch_index\n for i in range(len(self.keyframes)):\n if self.keyframes[i].batch_index == keyframe.batch_index:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.batch_index)\n \n def get_index(self, index: int) -> Union[LatentKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def __getitem__(self, index) -> LatentKeyframeImport:\n return self.keyframes[index]\n \n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n\n def clone(self) -> 'LatentKeyframeGroupImport':\n cloned = LatentKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned"
},
{
"identifier": "TimestepKeyframeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeImport:\n def __init__(self,\n start_percent: float = 0.0,\n strength: float = 1.0,\n interpolation: str = StrengthInterpolationImport.NONE,\n control_weights: ControlWeightsImport = None,\n latent_keyframes: LatentKeyframeGroupImport = None,\n null_latent_kf_strength: float = 0.0,\n inherit_missing: bool = True,\n guarantee_usage: bool = True,\n mask_hint_orig: Tensor = None) -> None:\n self.start_percent = start_percent\n self.start_t = 999999999.9\n self.strength = strength\n self.interpolation = interpolation\n self.control_weights = control_weights\n self.latent_keyframes = latent_keyframes\n self.null_latent_kf_strength = null_latent_kf_strength\n self.inherit_missing = inherit_missing\n self.guarantee_usage = guarantee_usage\n self.mask_hint_orig = mask_hint_orig\n\n def has_control_weights(self):\n return self.control_weights is not None\n \n def has_latent_keyframes(self):\n return self.latent_keyframes is not None\n \n def has_mask_hint(self):\n return self.mask_hint_orig is not None\n \n \n @classmethod\n def default(cls) -> 'TimestepKeyframeImport':\n return cls(0.0)"
},
{
"identifier": "TimestepKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class TimestepKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[TimestepKeyframeImport] = []\n self.keyframes.append(TimestepKeyframeImport.default())\n\n def add(self, keyframe: TimestepKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same start_percent\n for i in range(len(self.keyframes)):\n if self.keyframes[i].start_percent == keyframe.start_percent:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.start_percent)\n\n def get_index(self, index: int) -> Union[TimestepKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def has_index(self, index: int) -> int:\n return index >=0 and index < len(self.keyframes)\n\n def __getitem__(self, index) -> TimestepKeyframeImport:\n return self.keyframes[index]\n \n def __len__(self) -> int:\n return len(self.keyframes)\n\n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n \n def clone(self) -> 'TimestepKeyframeGroupImport':\n cloned = TimestepKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned\n \n @classmethod\n def default(cls, keyframe: TimestepKeyframeImport) -> 'TimestepKeyframeGroupImport':\n group = cls()\n group.keyframes[0] = keyframe\n return group"
},
{
"identifier": "is_advanced_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def is_advanced_controlnet(input_object):\n return hasattr(input_object, \"sub_idxs\")"
},
{
"identifier": "StrengthInterpolationImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class StrengthInterpolationImport:\n LINEAR = \"linear\"\n EASE_IN = \"ease-in\"\n EASE_OUT = \"ease-out\"\n EASE_IN_OUT = \"ease-in-out\"\n NONE = \"none\""
},
{
"identifier": "DefaultWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class DefaultWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self):\n weights = ControlWeightsImport.default()\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) "
},
{
"identifier": "ScaledSoftMaskedUniversalWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class ScaledSoftMaskedUniversalWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"mask\": (\"MASK\", ),\n \"min_base_multiplier\": (\"FLOAT\", {\"default\": 0.0, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n \"max_base_multiplier\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n #\"lock_min\": (\"BOOLEAN\", {\"default\": False}, ),\n #\"lock_max\": (\"BOOLEAN\", {\"default\": False}, ),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self, mask: Tensor, min_base_multiplier: float, max_base_multiplier: float, lock_min=False, lock_max=False):\n # normalize mask\n mask = mask.clone()\n x_min = 0.0 if lock_min else mask.min()\n x_max = 1.0 if lock_max else mask.max()\n if x_min == x_max:\n mask = torch.ones_like(mask) * max_base_multiplier\n else:\n mask = linear_conversion(mask, x_min, x_max, min_base_multiplier, max_base_multiplier)\n weights = ControlWeightsImport.universal_mask(weight_mask=mask)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "ScaledSoftUniversalWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class ScaledSoftUniversalWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"base_multiplier\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 1.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights\"\n\n def load_weights(self, base_multiplier, flip_weights):\n weights = ControlWeightsImport.universal(base_multiplier=base_multiplier, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) "
},
{
"identifier": "SoftControlNetWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class SoftControlNetWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 0.09941396206337118, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 0.12050177219802567, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 0.14606275417942507, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 0.17704576264172736, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_04\": (\"FLOAT\", {\"default\": 0.214600924414215, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_05\": (\"FLOAT\", {\"default\": 0.26012233262329093, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_06\": (\"FLOAT\", {\"default\": 0.3152997971191405, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_07\": (\"FLOAT\", {\"default\": 0.3821815722656249, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_08\": (\"FLOAT\", {\"default\": 0.4632503906249999, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_09\": (\"FLOAT\", {\"default\": 0.561515625, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_10\": (\"FLOAT\", {\"default\": 0.6806249999999999, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_11\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_12\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12]\n weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "CustomControlNetWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class CustomControlNetWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_04\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_05\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_06\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_07\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_08\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_09\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_10\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_11\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_12\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n }\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, \n weight_07, weight_08, weight_09, weight_10, weight_11, weight_12]\n weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "SoftT2IAdapterWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class SoftT2IAdapterWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 0.25, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 0.62, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 0.825, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03]\n weights = get_properly_arranged_t2i_weights(weights)\n weights = ControlWeightsImport.t2iadapter(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "CustomT2IAdapterWeightsImport",
"path": "imports/AdvancedControlNet/weight_nodes.py",
"snippet": "class CustomT2IAdapterWeightsImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"weight_00\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_01\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_02\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"weight_03\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n \"flip_weights\": (\"BOOLEAN\", {\"default\": False}),\n },\n }\n \n RETURN_TYPES = (\"CONTROL_NET_WEIGHTS\", \"TIMESTEP_KEYFRAME\",)\n RETURN_NAMES = WEIGHTS_RETURN_NAMES\n FUNCTION = \"load_weights\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter\"\n\n def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights):\n weights = [weight_00, weight_01, weight_02, weight_03]\n weights = get_properly_arranged_t2i_weights(weights)\n weights = ControlWeightsImport.t2iadapter(weights, flip_weights=flip_weights)\n return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights)))"
},
{
"identifier": "LatentKeyframeGroupNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeGroupNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"index_strengths\": (\"STRING\", {\"multiline\": True, \"default\": \"\"}),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n \"latent_optional\": (\"LATENT\", ),\n \"print_keyframes\": (\"BOOLEAN\", {\"default\": False})\n }\n }\n \n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframes\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def validate_index(self, index: int, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int:\n # if part of range, do nothing\n if is_range:\n return index\n # otherwise, validate index\n # validate not out of range - only when latent_count is passed in\n if latent_count > 0 and index > latent_count-1:\n raise IndexError(f\"Index '{index}' out of range for the total {latent_count} latents.\")\n # if negative, validate not out of range\n if index < 0:\n if not allow_negative:\n raise IndexError(f\"Negative indeces not allowed, but was {index}.\")\n conv_index = latent_count+index\n if conv_index < 0:\n raise IndexError(f\"Index '{index}', converted to '{conv_index}' out of range for the total {latent_count} latents.\")\n index = conv_index\n return index\n\n def convert_to_index_int(self, raw_index: str, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int:\n try:\n return self.validate_index(int(raw_index), latent_count=latent_count, is_range=is_range, allow_negative=allow_negative)\n except ValueError as e:\n raise ValueError(f\"index '{raw_index}' must be an integer.\", e)\n\n def convert_to_latent_keyframes(self, latent_indeces: str, latent_count: int) -> set[LatentKeyframeImport]:\n if not latent_indeces:\n return set()\n int_latent_indeces = [i for i in range(0, latent_count)]\n allow_negative = latent_count > 0\n chosen_indeces = set()\n # parse string - allow positive ints, negative ints, and ranges separated by ':'\n groups = latent_indeces.split(\",\")\n groups = [g.strip() for g in groups]\n for g in groups:\n # parse strengths - default to 1.0 if no strength given\n strength = 1.0\n if '=' in g:\n g, strength_str = g.split(\"=\", 1)\n g = g.strip()\n try:\n strength = float(strength_str.strip())\n except ValueError as e:\n raise ValueError(f\"strength '{strength_str}' must be a float.\", e)\n if strength < 0:\n raise ValueError(f\"Strength '{strength}' cannot be negative.\")\n # parse range of indeces (e.g. 2:16)\n if ':' in g:\n index_range = g.split(\":\", 1)\n index_range = [r.strip() for r in index_range]\n start_index = self.convert_to_index_int(index_range[0], latent_count=latent_count, is_range=True, allow_negative=allow_negative)\n end_index = self.convert_to_index_int(index_range[1], latent_count=latent_count, is_range=True, allow_negative=allow_negative)\n # if latents were passed in, base indeces on known latent count\n if len(int_latent_indeces) > 0:\n for i in int_latent_indeces[start_index:end_index]:\n chosen_indeces.add(LatentKeyframeImport(i, strength))\n # otherwise, assume indeces are valid\n else:\n for i in range(start_index, end_index):\n chosen_indeces.add(LatentKeyframeImport(i, strength))\n # parse individual indeces\n else:\n chosen_indeces.add(LatentKeyframeImport(self.convert_to_index_int(g, latent_count=latent_count, allow_negative=allow_negative), strength))\n return chosen_indeces\n\n def load_keyframes(self,\n index_strengths: str,\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n latent_image_opt=None,\n print_keyframes=False):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n latent_count = -1\n if latent_image_opt:\n latent_count = latent_image_opt['samples'].size()[0]\n latent_keyframes = self.convert_to_latent_keyframes(index_strengths, latent_count=latent_count)\n\n for latent_keyframe in latent_keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n \n if print_keyframes:\n for keyframe in curr_latent_keyframe.keyframes:\n logger.info(f\"keyframe {keyframe.batch_index}:{keyframe.strength}\")\n\n # replace values with prev_latent_keyframes\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n return (curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeInterpolationNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeInterpolationNodeImport:\n \n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"batch_index_from\": (\"INT\", {\"default\": 0, \"min\": -10000, \"max\": 10000, \"step\": 1}),\n \"batch_index_to_excl\": (\"INT\", {\"default\": 0, \"min\": -10000, \"max\": 10000, \"step\": 1}),\n \"strength_from\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.0001}, ),\n \"strength_to\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.0001}, ),\n \"interpolation\": ([\"linear\", \"ease-in\", \"ease-out\", \"ease-in-out\"], ),\n \"revert_direction_at_midpoint\": (\"BOOLEAN\", {\"default\": False}),\n },\n \"optional\": {\n \"prev_latent_keyframe\": (\"LATENT_KEYFRAME\", ),\n }\n }\n\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self,\n batch_index_from: int,\n strength_from: float,\n batch_index_to_excl: int,\n strength_to: float,\n interpolation: str,\n revert_direction_at_midpoint: bool=False,\n last_key_frame_position: int=0,\n i=0,\n number_of_items=0,\n buffer=0,\n prev_latent_keyframe: LatentKeyframeGroupImport=None):\n\n\n\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else: \n prev_latent_keyframe = prev_latent_keyframe.clone()\n \n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n weights, frame_numbers = calculate_weights(batch_index_from, batch_index_to_excl, strength_from, strength_to, interpolation, revert_direction_at_midpoint, last_key_frame_position,i,number_of_items, buffer)\n \n for i, frame_number in enumerate(frame_numbers):\n keyframe = LatentKeyframeImport(frame_number, float(weights[i])) \n curr_latent_keyframe.add(keyframe)\n\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n\n return (weights, frame_numbers, curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeBatchedGroupNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeBatchedGroupNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"float_strengths\": (\"FLOAT\", {\"default\": -1, \"min\": -1, \"step\": 0.001, \"forceInput\": True}),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n \"print_keyframes\": (\"BOOLEAN\", {\"default\": False})\n }\n }\n\n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self, float_strengths: Union[float, list[float]],\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n print_keyframes=False):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n curr_latent_keyframe = LatentKeyframeGroupImport()\n\n # if received a normal float input, do nothing\n if type(float_strengths) in (float, int):\n logger.info(\"No batched float_strengths passed into Latent Keyframe Batch Group node; will not create any new keyframes.\")\n # if iterable, attempt to create LatentKeyframes with chosen strengths\n elif isinstance(float_strengths, Iterable):\n for idx, strength in enumerate(float_strengths):\n keyframe = LatentKeyframeImport(idx, strength)\n curr_latent_keyframe.add(keyframe)\n else:\n raise ValueError(f\"Expected strengths to be an iterable input, but was {type(float_strengths).__repr__}.\") \n\n if print_keyframes:\n for keyframe in curr_latent_keyframe.keyframes:\n logger.info(f\"keyframe {keyframe.batch_index}:{keyframe.strength}\")\n\n # replace values with prev_latent_keyframes\n for latent_keyframe in prev_latent_keyframe.keyframes:\n curr_latent_keyframe.add(latent_keyframe)\n\n return (curr_latent_keyframe,)"
},
{
"identifier": "LatentKeyframeNodeImport",
"path": "imports/AdvancedControlNet/latent_keyframe_nodes.py",
"snippet": "class LatentKeyframeNodeImport:\n @classmethod\n def INPUT_TYPES(s):\n return {\n \"required\": {\n \"batch_index\": (\"INT\", {\"default\": 0, \"min\": -1000, \"max\": 1000, \"step\": 1}),\n \"strength\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 10.0, \"step\": 0.001}, ),\n },\n \"optional\": {\n \"prev_latent_kf\": (\"LATENT_KEYFRAME\", ),\n }\n }\n\n RETURN_NAMES = (\"LATENT_KF\", )\n RETURN_TYPES = (\"LATENT_KEYFRAME\", )\n FUNCTION = \"load_keyframe\"\n\n CATEGORY = \"Adv-ControlNet 🛂🅐🅒🅝/keyframes\"\n\n def load_keyframe(self,\n batch_index: int,\n strength: float,\n prev_latent_kf: LatentKeyframeGroupImport=None,\n prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name\n ):\n prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf\n if not prev_latent_keyframe:\n prev_latent_keyframe = LatentKeyframeGroupImport()\n else:\n prev_latent_keyframe = prev_latent_keyframe.clone()\n keyframe = LatentKeyframeImport(batch_index, strength)\n prev_latent_keyframe.add(keyframe)\n return (prev_latent_keyframe,)"
},
{
"identifier": "logger",
"path": "imports/AdvancedControlNet/logger.py",
"snippet": "class ColoredFormatter(logging.Formatter):\n COLORS = {\n \"DEBUG\": \"\\033[0;36m\", # CYAN\n \"INFO\": \"\\033[0;32m\", # GREEN\n \"WARNING\": \"\\033[0;33m\", # YELLOW\n \"ERROR\": \"\\033[0;31m\", # RED\n \"CRITICAL\": \"\\033[0;37;41m\", # WHITE ON RED\n \"RESET\": \"\\033[0m\", # RESET COLOR\n }\n def format(self, record):"
}
] | import numpy as np
import folder_paths
from torch import Tensor
from .control import load_controlnet, convert_to_advanced, ControlWeightsImport, ControlWeightTypeImport,\
LatentKeyframeGroupImport, TimestepKeyframeImport, TimestepKeyframeGroupImport, is_advanced_controlnet
from .control import StrengthInterpolationImport as SI
from .weight_nodes import DefaultWeightsImport, ScaledSoftMaskedUniversalWeightsImport, ScaledSoftUniversalWeightsImport, SoftControlNetWeightsImport, CustomControlNetWeightsImport, \
SoftT2IAdapterWeightsImport, CustomT2IAdapterWeightsImport
from .latent_keyframe_nodes import LatentKeyframeGroupNodeImport, LatentKeyframeInterpolationNodeImport, LatentKeyframeBatchedGroupNodeImport, LatentKeyframeNodeImport
from .logger import logger | 10,365 | prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength,
control_weights=control_net_weights, latent_keyframes=latent_keyframe, inherit_missing=inherit_missing, guarantee_usage=guarantee_usage,
mask_hint_orig=mask_optional)
prev_timestep_keyframe.add(keyframe)
return (prev_timestep_keyframe,)
class ControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"control_net_name": (folder_paths.get_filename_list("controlnet"), ),
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe)
return (controlnet,)
class DiffControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name, model,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe, model)
if is_advanced_controlnet(controlnet):
controlnet.verify_all_weights()
return (controlnet,)
class AdvancedControlNetApplyImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
},
"optional": {
"mask_optional": ("MASK", ),
"timestep_kf": ("TIMESTEP_KEYFRAME", ),
"latent_kf_override": ("LATENT_KEYFRAME", ),
"weights_override": ("CONTROL_NET_WEIGHTS", ),
}
}
RETURN_TYPES = ("CONDITIONING","CONDITIONING")
RETURN_NAMES = ("positive", "negative")
FUNCTION = "apply_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent,
mask_optional: Tensor=None,
timestep_kf: TimestepKeyframeGroupImport=None, latent_kf_override: LatentKeyframeGroupImport=None,
weights_override: ControlWeightsImport=None):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
# copy, convert to advanced if needed, and set cond
|
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength,
control_weights=control_net_weights, latent_keyframes=latent_keyframe, inherit_missing=inherit_missing, guarantee_usage=guarantee_usage,
mask_hint_orig=mask_optional)
prev_timestep_keyframe.add(keyframe)
return (prev_timestep_keyframe,)
class ControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"control_net_name": (folder_paths.get_filename_list("controlnet"), ),
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe)
return (controlnet,)
class DiffControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name, model,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe, model)
if is_advanced_controlnet(controlnet):
controlnet.verify_all_weights()
return (controlnet,)
class AdvancedControlNetApplyImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
},
"optional": {
"mask_optional": ("MASK", ),
"timestep_kf": ("TIMESTEP_KEYFRAME", ),
"latent_kf_override": ("LATENT_KEYFRAME", ),
"weights_override": ("CONTROL_NET_WEIGHTS", ),
}
}
RETURN_TYPES = ("CONDITIONING","CONDITIONING")
RETURN_NAMES = ("positive", "negative")
FUNCTION = "apply_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent,
mask_optional: Tensor=None,
timestep_kf: TimestepKeyframeGroupImport=None, latent_kf_override: LatentKeyframeGroupImport=None,
weights_override: ControlWeightsImport=None):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
# copy, convert to advanced if needed, and set cond | c_net = convert_to_advanced(control_net.copy()).set_cond_hint(control_hint, strength, (start_percent, end_percent)) | 1 | 2023-11-11 01:26:26+00:00 | 12k |
Zaloog/kanban-python | src/kanban_python/controls.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def active_board(self, new_board):\n def kanban_boards(self) -> list:\n def kanban_boards_dict(self) -> dict:\n def kanban_boards_dict(self, board_name: str) -> dict:\n def active_board_path(self) -> str:\n def show_footer(self):\n def show_footer(self, visible):\n def col_min_width(self) -> int:\n def col_min_width(self, new_width: int) -> None:\n def kanban_columns_dict(self) -> dict:\n def kanban_columns_dict(self, updated_dict) -> dict:\n def vis_cols(self) -> list:\n def done_limit(self) -> int:\n def done_limit(self, new_limit: int) -> None:\n def scanned_files(self) -> list:\n def scanned_files(self, new_files_to_scan: str) -> None:\n def scanned_patterns(self) -> list:\n def scanned_patterns(self, new_patterns_to_scan: str) -> None:\ndef create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH):\ndef delete_current_folder_board_from_config(\n cfg=cfg, curr_path: str = str(Path.cwd())\n) -> None:\ndef check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool:\ndef check_if_current_active_board_in_board_list(cfg=cfg) -> bool:\ndef delete_board_from_config(board_name, cfg=cfg) -> None:\ndef check_config_exists(path=CONFIG_FILE_PATH) -> bool:\ndef get_json_path(boardname: str):"
},
{
"identifier": "DUMMY_DB",
"path": "src/kanban_python/constants.py",
"snippet": "DUMMY_DB = {1: DUMMY_TASK}"
},
{
"identifier": "KANBAN_BOARDS_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "KANBAN_BOARDS_PATH = DATA_PATH / KANBAN_BOARDS_FOLDER_NAME"
},
{
"identifier": "REPORT_FILE_NAME",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_FILE_NAME = \"pykanban.md\""
},
{
"identifier": "REPORT_FILE_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_FILE_PATH = DATA_PATH / REPORTS_FOLDER_NAME"
},
{
"identifier": "TASK_FILE_NAME",
"path": "src/kanban_python/constants.py",
"snippet": "TASK_FILE_NAME = \"pykanban.json\""
},
{
"identifier": "create_config_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_config_table():\n settings_table = Table(\n title=\":hammer_and_wrench: [grey69]Settings Overview[/]:hammer_and_wrench:\",\n highlight=True,\n show_header=True,\n caption=f\"Your config file is located under [light_green]{CONFIG_FILE_PATH}[/]\",\n )\n for col in [\"Option\", \"Current Value\"]:\n settings_table.add_column(\n header=col,\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n min_width=30,\n )\n for section in cfg.config:\n if section:\n settings_table.add_section()\n settings_table.add_row(f\"[blue]{section}[/]\", \"\")\n for key, val in cfg.config[section].items():\n settings_table.add_row(key, val)\n\n return settings_table"
},
{
"identifier": "create_github_like_report_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_github_like_report_table(boards_dict: dict):\n done_tasks = []\n for _, task_dict in boards_dict.items():\n done_tasks += [task for _, task in task_dict.items() if task[\"Complete_Time\"]]\n\n max_val, report_dict = create_dict_for_report_view(done_tasks)\n current_year = datetime.now().year\n done_tasks_this_year = [\n task\n for task in done_tasks\n if datetime.strptime(task[\"Complete_Time\"], \"%Y-%m-%d %H:%M:%S\").year\n == current_year\n ]\n\n gh_table = Table(\n title=f\"[{REPORT_COLORS[4]}]{len(done_tasks_this_year)}[/] Tasks completed\"\n + f\" in [{REPORT_COLORS[4]}]{current_year}[/]\",\n title_justify=\"left\",\n highlight=True,\n padding=False,\n show_header=True,\n box=None,\n caption=\"\\nless\"\n + \" \".join([f\"[{scale} on {scale}] [/] \" for scale in REPORT_COLORS])\n + \" more\",\n caption_justify=\"right\",\n )\n for work_week in range(0, 53):\n gh_table.add_column(\n header=\"\" if (work_week % 5 or work_week == 0) else f\"{work_week}\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n )\n\n for day in range(1, 8):\n day_name = calendar.day_abbr[day - 1] if day % 2 else \"\"\n day_row_vals = [report_dict[day].get(week, 0) for week in range(1, 53)]\n mapped_day_row_vals = create_color_mapping(day_row_vals, max_val=max_val)\n\n gh_table.add_row(\n day_name,\n *[\n f\"[{REPORT_COLORS[i]} on {REPORT_COLORS[i]}] [/]\"\n for i in mapped_day_row_vals\n ],\n )\n\n return gh_table"
},
{
"identifier": "create_table",
"path": "src/kanban_python/interface.py",
"snippet": "def create_table(data: dict) -> Table:\n status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)\n\n table_name = cfg.active_board\n table = Table(\n title=f\"[blue]Active Board: {table_name}[/]\",\n highlight=True,\n show_header=True,\n show_footer=True if cfg.show_footer == \"True\" else False,\n caption=BOARD_CAPTION_STRING,\n )\n\n for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):\n table.add_column(\n header=category + f\"\\t({len(status_dict[cfg.vis_cols[i]])} Task/s)\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n footer=FOOTER[0]\n if i == 0\n else FOOTER[1]\n if i == len(cfg.vis_cols) - 1\n else \"\",\n min_width=cfg.col_min_width,\n )\n\n for row_tasks in zip_longest(*status_dict.values()):\n table.add_row(*row_tasks)\n\n return table"
},
{
"identifier": "input_ask_for_action",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_action():\n console.print(\n \"[yellow]Whats up!?[/], how can I help you being productive today :rocket:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [green]Create new Task[/]\"\n + 2 * \"\\t\"\n + \"[2] :clockwise_vertical_arrows: [bold cornflower_blue]Update/Check Task[/]\"\n )\n console.print(\n \"\\t[3] :bookmark_tabs: [bold yellow]Change Kanban Board[/]\"\n + \"\\t\"\n + \"[4] :magnifying_glass_tilted_left: [bold blue]Show Task Details[/]\"\n )\n console.print(\n \"\\t[5] :cross_mark: [red]Delete Kanban Board[/]\"\n + \"\\t\"\n + \"[6] :hammer_and_wrench: [grey69]Show Current Settings[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose wisely :books:\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n ],\n show_choices=False,\n )\n return action"
},
{
"identifier": "input_ask_for_action_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_action_settings() -> int:\n console.print(\n \"[yellow]Not happy with current settings!?[/],\"\n + \"which [blue]Section[/] do you want to change :hammer_and_wrench:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [blue]settings.general[/]\"\n + 2 * \"\\t\"\n + \"[2] :eye: [blue]settings.columns.visibility[/]\"\n )\n console.print(\n \"\\t[3] :magnifying_glass_tilted_left: [blue]settings.scanner[/]\"\n + 2 * \"\\t\"\n + \"[4] :cross_mark: [red]Go back to Kanban Board[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose [blue]Section[/], where you want to change the Current Value\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n ],\n show_choices=False,\n )\n return action"
},
{
"identifier": "input_ask_for_change_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_change_board(boards_dict: dict) -> str:\n boards = cfg.kanban_boards\n max_board_len = max([len(b) for b in cfg.kanban_boards])\n\n # if active Board is not in Board List dont show default\n try:\n active_board_idx = boards.index(cfg.active_board) + 1\n except ValueError:\n active_board_idx = None\n\n for idx, (board, board_data) in enumerate(boards_dict.items(), start=1):\n status_dict = create_status_dict_for_rows(board_data, cfg.vis_cols)\n days_left_list = [\n calculate_days_left_till_due(val[\"Due_Date\"])\n for val in board_data.values()\n if (val.get(\"Due_Date\") and (val[\"Status\"] in [\"Ready\", \"Doing\"]))\n ]\n # Use -9999 to as placeholder for no tasks to make comparison later\n days_left = min(days_left_list) if days_left_list else -9999\n console.print(\n f\"[{idx}] {board}\"\n + \" \" * ((max_board_len - len(board) + 1))\n + \" | \".join(\n [\n f\"{COLOR_DICT[col]}: {len(status_dict[col]):02d}\"\n for col in cfg.vis_cols\n ]\n )\n + (\n f\"\\t next due in {days_left} day/s\"\n if days_left > 0\n else f\"[red]\\t task {-days_left} day/s overdue[/]\"\n if days_left != -9999\n else \"\\t no dues present here\"\n )\n )\n\n answer = IntPrompt.ask(\n prompt=\"Which board to activate\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n default=active_board_idx,\n show_default=True,\n )\n return boards[int(answer) - 1]"
},
{
"identifier": "input_ask_for_delete_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_delete_board() -> str:\n boards = [b for b in cfg.kanban_boards]\n for idx, board in enumerate(boards, start=1):\n console.print(f\"[{idx}] {board}\")\n\n answer = IntPrompt.ask(\n prompt=\"Which board to delete\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n )\n return boards[int(answer) - 1]"
},
{
"identifier": "input_ask_for_new_board_name",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_for_new_board_name() -> str:\n return Prompt.ask(\n prompt=\"A new folder will be created for your board\\n\"\n + \":warning: [yellow]Only[/] use alpha-numeric characters or\"\n + \" [green]'-', '_', ' '[/] for new board names.\\n\"\n + \"What should the new board be called?\"\n )"
},
{
"identifier": "input_ask_which_task_to_update",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_which_task_to_update(data: dict) -> str:\n choice_task_ids = [\n id for id, task in data.items() if task[\"Status\"] in cfg.vis_cols\n ]\n task_id_to_update = IntPrompt.ask(\n prompt=\"Which Task to update? Select an [[cyan]Id[/]]\",\n choices=choice_task_ids,\n show_choices=False,\n )\n return str(task_id_to_update)"
},
{
"identifier": "input_ask_which_tasks_to_show",
"path": "src/kanban_python/interface.py",
"snippet": "def input_ask_which_tasks_to_show(choices):\n return Prompt.ask(\n prompt=\"What Task/s to show? Select an [[cyan]Id[/]] or ([orange3]Tag[/])?\",\n default=False,\n show_default=False,\n choices=choices,\n show_choices=False,\n )"
},
{
"identifier": "input_change_column_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_column_settings():\n updated_column_dict = {}\n for col, vis in cfg.kanban_columns_dict.items():\n new_visible = Confirm.ask(\n prompt=f\"Should Column {COLOR_DICT.get(col,col)} be visible?\",\n default=True if vis == \"True\" else False,\n show_default=True,\n )\n updated_column_dict[col] = \"True\" if new_visible else \"False\"\n\n return updated_column_dict"
},
{
"identifier": "input_change_done_limit_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_done_limit_settings() -> int:\n done_limit = IntPrompt.ask(\n prompt=f\"What should the Limit of Tasks in {COLOR_DICT.get('Done','Done')} \"\n + f\"Column be, before moving to {COLOR_DICT.get('Archived','Archived')}?\",\n default=cfg.done_limit,\n show_default=True,\n )\n\n return str(done_limit)"
},
{
"identifier": "input_change_files_to_scan_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_files_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Files to scan? Enter [green]' '[/] separated File Endings\",\n default=\" \".join(cfg.scanned_files),\n show_default=True,\n )\n\n return files_to_scan"
},
{
"identifier": "input_change_footer_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_footer_settings():\n footer_visible = Confirm.ask(\n prompt=\"Should Footer be visible?\",\n default=True if cfg.show_footer == \"True\" else False,\n show_default=True,\n )\n\n return footer_visible"
},
{
"identifier": "input_change_min_col_width_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_min_col_width_settings():\n new_min_col_width = IntPrompt.ask(\n prompt=\"What should the minimum Column Width be?\",\n default=cfg.col_min_width,\n show_default=True,\n )\n\n return new_min_col_width"
},
{
"identifier": "input_change_patterns_to_scan_settings",
"path": "src/kanban_python/interface.py",
"snippet": "def input_change_patterns_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Patterns to scan? Enter [green]','[/] separated Patterns\",\n default=\",\".join(cfg.scanned_patterns),\n show_default=True,\n )\n\n return files_to_scan"
},
{
"identifier": "input_confirm_add_todos_to_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_add_todos_to_board(todos: list) -> bool:\n # Question Also print tasks already in Board?\n console.print(f\"Found [blue]{len(todos)}[/] TODOs.\")\n if len(todos) > 10:\n if input_confirm_show_all_todos():\n print_all_todos(todos)\n else:\n print_all_todos(todos)\n\n return Confirm.ask(\n prompt=\"Add found Tasks to active board?\", default=False, show_default=True\n )"
},
{
"identifier": "input_confirm_delete_board",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_delete_board(name) -> bool:\n return Confirm.ask(\n f\"Are you sure you want to delete the Board '{name}':question_mark:\"\n )"
},
{
"identifier": "input_confirm_set_board_active",
"path": "src/kanban_python/interface.py",
"snippet": "def input_confirm_set_board_active(name) -> bool:\n return Confirm.ask(\n f\"Do you want to set the Board '{name}' as active:question_mark:\"\n )"
},
{
"identifier": "input_create_new_task",
"path": "src/kanban_python/interface.py",
"snippet": "def input_create_new_task() -> dict:\n title = Prompt.ask(\n prompt=\"[1/5] Add Task Title\",\n )\n\n description = Prompt.ask(\n prompt=\"[2/5] Add Task Description\",\n show_default=True,\n default=\"\",\n )\n\n tag = Prompt.ask(\n prompt=\"[3/5] Add a Tag\",\n show_default=True,\n default=\"ETC\",\n )\n\n while True:\n due_date = Prompt.ask(\n prompt=\"[4/5] Add a Due Date (YYYY-MM-DD)\",\n show_default=True,\n default=\"\",\n )\n if not due_date or check_due_date_format(date_str=due_date):\n break\n else:\n console.print(\n f\":warning: '{due_date}' has [red]not[/] \"\n + \"the right format YYYY-MM-DD\"\n )\n\n console.print(f\"\\t[1] {COLOR_DICT['Ready']}\")\n console.print(f\"\\t[2] {COLOR_DICT['Doing']}\")\n\n status = IntPrompt.ask(\n prompt=\"[5/5] Status of Task\",\n show_choices=False,\n choices=[\"1\", \"2\"],\n show_default=True,\n default=\"1\",\n )\n\n new_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": \"Ready\" if str(status) == \"1\" else \"Doing\",\n \"Tag\": tag.upper(),\n \"Creation_Date\": current_time_to_str(),\n \"Due_Date\": due_date_date_to_datetime(due_date),\n \"Begin_Time\": current_time_to_str() if str(status) == \"2\" else \"\",\n \"Complete_Time\": \"\",\n \"Duration\": 0,\n }\n return new_task"
},
{
"identifier": "input_update_task",
"path": "src/kanban_python/interface.py",
"snippet": "def input_update_task(current_task: dict) -> dict:\n title = input_update_task_title(current_task[\"Title\"])\n description = input_update_task_description(current_task[\"Description\"])\n tag = input_update_task_tag(current_task[\"Tag\"])\n due_date = input_update_due_date(current_task.get(\"Due_Date\", \"\"))\n status = input_ask_to_what_status_to_move(current_task[\"Title\"])\n\n if (status == \"Doing\") and (current_task[\"Status\"] != \"Doing\"):\n start_doing = current_time_to_str()\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n elif (status != \"Doing\") and (current_task[\"Status\"] == \"Doing\"):\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_time_to_str()\n duration = calculate_time_delta_str(\n start_time_str=current_task.get(\"Begin_Time\", \"\"), end_time_str=stop_doing\n ) + current_task.get(\"Duration\", 0)\n else:\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n\n if status == \"Done\":\n stop_doing = current_time_to_str()\n console.print(\n f\":sparkle: Congrats, you just completed '{title}'\"\n + f\" after {duration} minutes :muscle:\"\n )\n\n updated_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": status,\n \"Tag\": tag.upper(),\n \"Due_Date\": due_date,\n \"Begin_Time\": start_doing,\n \"Complete_Time\": stop_doing,\n \"Duration\": duration,\n }\n current_task.update(updated_task)\n return current_task"
},
{
"identifier": "check_board_name_valid",
"path": "src/kanban_python/utils.py",
"snippet": "def get_motivational_quote() -> str:\ndef current_time_to_str() -> str:\ndef calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float:\ndef create_status_dict_for_rows(data: dict, vis_cols: list) -> dict:\ndef check_if_done_col_leq_X(cfg, data: dict) -> bool:\ndef check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool:\ndef move_first_done_task_to_archive(data: dict):\ndef delete_json_file(db_path: str) -> None:\ndef check_board_name_valid(boardname: str):\ndef scan_files(path=Path.cwd(), endings: list = [\".py\"]):\n def recursive_search(path, file_list: list, progress):\ndef scan_for_todos(\n file_paths: list, rel_path=Path.cwd(), patterns: list = [\"#TODO\", \"# TODO\"]\n) -> list:\ndef split_todo_in_tag_and_title(todo: str, patterns: list):\ndef get_tag_id_choices(data_dict: dict, vis_cols: list) -> list:\ndef check_scanner_files_valid(files: str) -> bool:\ndef check_scanner_patterns_valid(patterns: str) -> bool:\ndef get_iso_calender_info(date_str: str):\ndef create_dict_for_report_view(completed_tasks: list):\ndef create_color_mapping(amount_list: list, max_val: int):\ndef create_report_document(boards_dict: dict):\ndef check_due_date_format(date_str: str) -> bool:\ndef due_date_datetime_to_date(date_datetime: str) -> str:\ndef due_date_date_to_datetime(date_str: str) -> str:\ndef calculate_days_left_till_due(due_date: str):"
}
] | from json import dump, load
from rich.pretty import pprint
from .config import (
cfg,
check_if_board_name_exists_in_config,
check_if_current_active_board_in_board_list,
delete_board_from_config,
get_json_path,
)
from .constants import (
DUMMY_DB,
KANBAN_BOARDS_PATH,
REPORT_FILE_NAME,
REPORT_FILE_PATH,
TASK_FILE_NAME,
)
from .interface import (
create_config_table,
create_github_like_report_table,
create_table,
input_ask_for_action,
input_ask_for_action_settings,
input_ask_for_change_board,
input_ask_for_delete_board,
input_ask_for_new_board_name,
input_ask_which_task_to_update,
input_ask_which_tasks_to_show,
input_change_column_settings,
input_change_done_limit_settings,
input_change_files_to_scan_settings,
input_change_footer_settings,
input_change_min_col_width_settings,
input_change_patterns_to_scan_settings,
input_confirm_add_todos_to_board,
input_confirm_delete_board,
input_confirm_set_board_active,
input_create_new_task,
input_update_task,
)
from .utils import (
check_board_name_valid,
check_if_done_col_leq_X,
check_if_there_are_visible_tasks_in_board,
check_scanner_files_valid,
check_scanner_patterns_valid,
console,
create_report_document,
current_time_to_str,
delete_json_file,
get_tag_id_choices,
move_first_done_task_to_archive,
scan_files,
scan_for_todos,
split_todo_in_tag_and_title,
) | 7,253 | if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
create_report_document(boards_dict=boards_dict)
console.print(
"\n[bright_black]You can find your markdown report under:"
| from __future__ import annotations
# DB Controls
#####################################################################################
def create_new_db() -> None:
while True:
while True:
new_board_name = input_ask_for_new_board_name()
if check_board_name_valid(new_board_name):
break
console.print(f":warning: '{new_board_name}' is [red]not[/] a valid Name.")
if not check_if_board_name_exists_in_config(new_board_name):
break
console.print(
f":warning: Board '{new_board_name}' already exists, choose another Name."
)
cfg.kanban_boards_dict = new_board_name
# Options:
# 1. ~/.kanban-python/<BOARDNAME>.json
# 2. ~/.kanban-python/kanban_boards/<BOARDNAME>.json
# 3. ~/.kanban-python/kanban_boards/<BOARDNAME>/pykanban.json <- THIS
# 4. ~/.kanban-python/kanban_boards/<BOARDNAME>/<BOARDNAME>.json
new_db_path = KANBAN_BOARDS_PATH / new_board_name
if not new_db_path.exists():
new_db_path.mkdir()
with open(get_json_path(new_board_name), "w", encoding="utf-8") as f:
dump(DUMMY_DB, f, ensure_ascii=False, indent=4)
console.print(
f"Created new [orange3]{TASK_FILE_NAME}[/] file at "
+ f"[orange3]{KANBAN_BOARDS_PATH / new_board_name}[/] to save tasks."
)
if input_confirm_set_board_active(name=new_board_name):
cfg.active_board = new_board_name
def save_db(data):
path = cfg.active_board_path
with open(path, "w", encoding="utf-8") as f:
dump(data, f, ensure_ascii=False, indent=4)
def add_tasks_to_db(tasks: dict | list[dict]) -> None:
db_data = read_db()
if isinstance(tasks, dict):
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = tasks
else:
for task in tasks:
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = task
save_db(data=db_data)
def read_db(path: str = None) -> dict:
if not path:
path = cfg.active_board_path
if path == "all":
board_dict = {
b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items()
}
return board_dict
try:
data = read_single_board(path)
return data
except FileNotFoundError:
print(path)
console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.")
console.print("Please change to another board.")
change_kanban_board()
console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]")
console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.")
create_new_db()
return read_db()
def read_single_board(path):
with open(path, "r") as file:
data = load(file)
return data
# User Action Controls
#####################################################################################
# Get User Action
def get_user_action():
return input_ask_for_action()
# Action 1
def add_new_task_to_db():
new_task = input_create_new_task()
add_tasks_to_db(tasks=new_task)
# Action 2
def update_task_from_db():
db_data = read_db()
if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols):
console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]")
return
selected_id = input_ask_which_task_to_update(db_data)
updated_task = input_update_task(current_task=db_data[selected_id])
db_data[selected_id] = updated_task
while not check_if_done_col_leq_X(cfg=cfg, data=db_data):
first_task_id, archive_task = move_first_done_task_to_archive(data=db_data)
db_data[first_task_id] = archive_task
save_db(data=db_data)
# Action 3
def change_kanban_board():
boards_dict = read_db(path="all")
new_active_board = input_ask_for_change_board(boards_dict)
cfg.active_board = new_active_board
# Action 4
def show_tasks():
db_data = read_db()
choices = get_tag_id_choices(db_data, cfg.vis_cols)
selection_criteria = input_ask_which_tasks_to_show(choices)
for i, task in db_data.items():
if selection_criteria in [i, task["Tag"]]:
console.print(
20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]"
)
pprint(
{
key: val
for key, val in task.items()
if key in ["Title", "Description", "Tag", "Status", "Due_Date"]
},
console=console,
expand_all=True,
)
# Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
create_report_document(boards_dict=boards_dict)
console.print(
"\n[bright_black]You can find your markdown report under:" | + f"\n[bold green]{REPORT_FILE_PATH/REPORT_FILE_NAME}" | 3 | 2023-11-11 14:43:55+00:00 | 12k |
AMAAI-Lab/mustango | audioldm/clap/open_clip/factory.py | [
{
"identifier": "CLAP",
"path": "audioldm/clap/open_clip/model.py",
"snippet": "class CLAP(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n audio_cfg: CLAPAudioCfp,\n text_cfg: CLAPTextCfg,\n quick_gelu: bool = False,\n enable_fusion: bool = False,\n fusion_type: str = \"None\",\n joint_embed_shape: int = 512,\n mlp_act: str = \"relu\",\n ):\n super().__init__()\n if isinstance(audio_cfg, dict):\n audio_cfg = CLAPAudioCfp(**audio_cfg)\n if isinstance(text_cfg, dict):\n text_cfg = CLAPTextCfg(**text_cfg)\n\n self.audio_cfg = audio_cfg\n self.text_cfg = text_cfg\n self.enable_fusion = enable_fusion\n self.fusion_type = fusion_type\n self.joint_embed_shape = joint_embed_shape\n self.mlp_act = mlp_act\n\n self.context_length = text_cfg.context_length\n\n # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more\n # memory efficient in recent PyTorch releases (>= 1.10).\n # NOTE: timm models always use native GELU regardless of quick_gelu flag.\n act_layer = QuickGELU if quick_gelu else nn.GELU\n\n if mlp_act == \"relu\":\n mlp_act_layer = nn.ReLU()\n elif mlp_act == \"gelu\":\n mlp_act_layer = nn.GELU()\n else:\n raise NotImplementedError\n\n # audio branch\n # audio branch parameters\n if audio_cfg.model_type == \"PANN\":\n self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)\n elif audio_cfg.model_type == \"HTSAT\":\n self.audio_branch = create_htsat_model(\n audio_cfg, enable_fusion, fusion_type\n )\n else:\n logging.error(f\"Model config for {audio_cfg.model_type} not found\")\n raise RuntimeError(f\"Model config for {audio_cfg.model_type} not found.\")\n\n # text branch\n # text branch parameters\n if text_cfg.model_type == \"transformer\":\n self.text_branch = Transformer(\n width=text_cfg.width,\n layers=text_cfg.layers,\n heads=text_cfg.heads,\n act_layer=act_layer,\n )\n self.vocab_size = text_cfg.vocab_size\n self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)\n self.positional_embedding = nn.Parameter(\n torch.empty(self.context_length, text_cfg.width)\n )\n self.ln_final = LayerNorm(text_cfg.width)\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(text_cfg.width, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"bert\":\n self.text_branch = BertModel.from_pretrained(\"bert-base-uncased\")\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"roberta\":\n self.text_branch = RobertaModel.from_pretrained(\"roberta-base\")\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"bart\":\n self.text_branch = BartModel.from_pretrained(\"facebook/bart-base\")\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n else:\n logging.error(f\"Model config for {text_cfg.model_type} not found\")\n raise RuntimeError(f\"Model config for {text_cfg.model_type} not found.\")\n self.text_branch_type = text_cfg.model_type\n # text branch parameters\n\n # audio branch parameters\n self.audio_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n\n # below here is text branch parameters\n\n # ============================================================================================================\n self.audio_projection = nn.Sequential(\n nn.Linear(embed_dim, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n\n self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.register_buffer(\"attn_mask\", self.build_attention_mask(), persistent=False)\n\n self.init_text_branch_parameters()\n\n def init_text_branch_parameters(self):\n if self.text_branch_type == \"transformer\":\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n proj_std = (self.text_branch.width**-0.5) * (\n (2 * self.text_branch.layers) ** -0.5\n )\n attn_std = self.text_branch.width**-0.5\n fc_std = (2 * self.text_branch.width) ** -0.5\n for block in self.text_branch.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n if self.text_branch_type == \"bert\" or self.text_branch_type == \"roberta\":\n width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]\n elif self.text_branch_type == \"bart\":\n width = self.text_branch.shared.weight.shape[-1]\n else:\n width = self.text_branch.width\n nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))\n nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))\n\n # deprecated\n # if hasattr(self.visual, 'init_parameters'):\n # self.visual.init_parameters()\n\n # if self.text_projection is not None:\n # nn.init.normal_(self.text_projection, std=width**-0.5)\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def encode_audio(self, audio, device):\n return self.audio_branch(\n audio, mixup_lambda=None, device=device\n ) # mix lambda needs to add\n\n # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):\n # tmp = {}\n # for k in x[0].keys():\n # tmp[k] = []\n # for i in range(len(x)):\n # tmp[k].append(x[i][k][:77])\n # for k in x[0].keys():\n # tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)\n # return tmp\n\n def encode_text(self, text, device):\n if self.text_branch_type == \"transformer\":\n text = text.to(device=device, non_blocking=True)\n x = self.token_embedding(text) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.text_branch(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x)\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])\n elif self.text_branch_type == \"bert\":\n # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)\n # text = BatchEncoding(text)\n x = self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n token_type_ids=text[\"token_type_ids\"].to(\n device=device, non_blocking=True\n ),\n )[\"pooler_output\"]\n x = self.text_projection(x)\n elif self.text_branch_type == \"roberta\":\n x = self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n )[\"pooler_output\"]\n x = self.text_projection(x)\n elif self.text_branch_type == \"bart\":\n x = torch.mean(\n self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n )[\"encoder_last_hidden_state\"],\n axis=1,\n )\n x = self.text_projection(x)\n else:\n logging.error(f\"Model type {self.text_branch_type} not found\")\n raise RuntimeError(f\"Model type {self.text_branch_type} not found.\")\n return x\n\n def forward(self, audio, text, device=None):\n \"\"\"Forward audio and text into the CLAP\n\n Parameters\n ----------\n audio: torch.Tensor (batch_size, audio_length)\n the time-domain audio input / the batch of mel_spec and longer list.\n text: torch.Tensor () // need to add\n the text token input\n \"\"\"\n if device is None:\n if audio is not None:\n device = audio.device\n elif text is not None:\n device = text.device\n if audio is None and text is None:\n # a hack to get the logit scale\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\n elif audio is None:\n return self.encode_text(text, device=device)\n elif text is None:\n return self.audio_projection(\n self.encode_audio(audio, device=device)[\"embedding\"]\n )\n audio_features = self.audio_projection(\n self.encode_audio(audio, device=device)[\"embedding\"]\n )\n audio_features = F.normalize(audio_features, dim=-1)\n\n text_features = self.encode_text(text, device=device)\n # print(\"text_features\", text_features)\n # print(\"text_features.shape\", text_features.shape)\n # print(\"text_features.type\", type(text_features))\n text_features = F.normalize(text_features, dim=-1)\n\n audio_features_mlp = self.audio_transform(audio_features)\n text_features_mlp = self.text_transform(text_features)\n # Four outputs: audio features (basic & MLP), text features (basic & MLP)\n return (\n audio_features,\n text_features,\n audio_features_mlp,\n text_features_mlp,\n self.logit_scale_a.exp(),\n self.logit_scale_t.exp(),\n )\n\n def get_logit_scale(self):\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\n\n def get_text_embedding(self, data):\n \"\"\"Get the text embedding from the model\n\n Parameters\n ----------\n data: torch.Tensor\n a tensor of text embedding\n\n Returns\n ----------\n text_embed: torch.Tensor\n a tensor of text_embeds (N, D)\n\n \"\"\"\n device = next(self.parameters()).device\n for k in data:\n data[k] = data[k].to(device)\n if len(data[k].size()) < 2:\n data[k] = data[k].unsqueeze(0)\n text_embeds = self.encode_text(data, device=device)\n text_embeds = F.normalize(text_embeds, dim=-1)\n\n return text_embeds\n\n def get_audio_embedding(self, data):\n \"\"\"Get the audio embedding from the model\n\n Parameters\n ----------\n data: a list of dict\n the audio input dict list from 'get_audio_feature' method\n\n Returns\n ----------\n audio_embed: torch.Tensor\n a tensor of audio_embeds (N, D)\n\n \"\"\"\n device = next(self.parameters()).device\n input_dict = {}\n keys = data[0].keys()\n for k in keys:\n input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(\n device\n )\n\n audio_embeds = self.audio_projection(\n self.encode_audio(input_dict, device=device)[\"embedding\"]\n )\n audio_embeds = F.normalize(audio_embeds, dim=-1)\n\n return audio_embeds\n\n def audio_infer(self, audio, hopsize=None, device=None):\n \"\"\"Forward one audio and produce the audio embedding\n\n Parameters\n ----------\n audio: (audio_length)\n the time-domain audio input, notice that it must be only one input\n hopsize: int\n the overlap hopsize as the sliding window\n\n Returns\n ----------\n output_dict: {\n key: [n, (embedding_shape)] if \"HTS-AT\"\n or\n key: [(embedding_shape)] if \"PANN\"\n }\n the list of key values of the audio branch\n\n \"\"\"\n\n assert not self.training, \"the inference mode must be run at eval stage\"\n output_dict = {}\n # PANN\n if self.audio_cfg.model_type == \"PANN\":\n audio_input = audio.unsqueeze(dim=0)\n output_dict[key] = self.encode_audio(audio_input, device=device)[\n key\n ].squeeze(dim=0)\n elif self.audio_cfg.model_type == \"HTSAT\":\n # repeat\n audio_len = len(audio)\n k = self.audio_cfg.clip_samples // audio_len\n if k > 1:\n audio = audio.repeat(k)\n audio_len = len(audio)\n\n if hopsize is None:\n hopsize = min(hopsize, audio_len)\n\n if audio_len > self.audio_cfg.clip_samples:\n audio_input = [\n audio[pos : pos + self.audio_cfg.clip_samples].clone()\n for pos in range(\n 0, audio_len - self.audio_cfg.clip_samples, hopsize\n )\n ]\n audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())\n audio_input = torch.stack(audio_input)\n output_dict[key] = self.encode_audio(audio_input, device=device)[key]\n else:\n audio_input = audio.unsqueeze(dim=0)\n output_dict[key] = self.encode_audio(audio_input, device=device)[\n key\n ].squeeze(dim=0)\n\n return output_dict"
},
{
"identifier": "convert_weights_to_fp16",
"path": "audioldm/clap/open_clip/model.py",
"snippet": "def convert_weights_to_fp16(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [\n *[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]],\n \"in_proj_bias\",\n \"bias_k\",\n \"bias_v\",\n ]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)"
},
{
"identifier": "load_openai_model",
"path": "audioldm/clap/open_clip/openai.py",
"snippet": "def load_openai_model(\n name: str,\n model_cfg,\n device: Union[str, torch.device] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n jit=True,\n cache_dir=os.path.expanduser(f\"{CACHE_DIR}/clip\"),\n enable_fusion: bool = False,\n fusion_type: str = \"None\",\n):\n \"\"\"Load a CLIP model, preserve its text pretrained part, and set in the CLAP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n\n Returns\n -------\n model : torch.nn.Module\n The CLAP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if get_pretrained_url(name, \"openai\"):\n model_path = download_pretrained(\n get_pretrained_url(name, \"openai\"), root=cache_dir\n )\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(\n f\"Model {name} not found; available models = {list_openai_models()}\"\n )\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(\n f\"File {model_path} is not a JIT archive. Loading as a state dict instead\"\n )\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n try:\n model = build_model_from_openai_state_dict(\n state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type\n ).to(device)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(\n sd, model_cfg, enable_fusion, fusion_type\n ).to(device)\n\n if str(device) == \"cpu\":\n model.float()\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(\n lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]\n )\n device_node = [\n n\n for n in device_holder.graph.findAllNodes(\"prim::Constant\")\n if \"Device\" in repr(n)\n ][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\n \"cuda\"\n ):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_audio)\n patch_device(model.encode_text)\n\n # patch dtype to float32 on CPU\n if str(device) == \"cpu\":\n float_holder = torch.jit.trace(\n lambda: torch.ones([]).float(), example_inputs=[]\n )\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [\n 1,\n 2,\n ]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_audio)\n patch_float(model.encode_text)\n model.float()\n\n model.audio_branch.audio_length = model.audio_cfg.audio_length\n return model"
},
{
"identifier": "get_pretrained_url",
"path": "audioldm/clap/open_clip/pretrained.py",
"snippet": "def get_pretrained_url(model: str, tag: str):\n if model not in _PRETRAINED:\n return \"\"\n model_pretrained = _PRETRAINED[model]\n if tag not in model_pretrained:\n return \"\"\n return model_pretrained[tag]"
},
{
"identifier": "download_pretrained",
"path": "audioldm/clap/open_clip/pretrained.py",
"snippet": "def download_pretrained(url: str, root: str = os.path.expanduser(f\"{CACHE_DIR}/clip\")):\n os.makedirs(root, exist_ok=True)\n filename = os.path.basename(url)\n\n if \"openaipublic\" in url:\n expected_sha256 = url.split(\"/\")[-2]\n else:\n expected_sha256 = \"\"\n\n download_target = os.path.join(root, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if expected_sha256:\n if (\n hashlib.sha256(open(download_target, \"rb\").read()).hexdigest()\n == expected_sha256\n ):\n return download_target\n else:\n warnings.warn(\n f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\"\n )\n else:\n return download_target\n\n with urllib.request.urlopen(url) as source, open(download_target, \"wb\") as output:\n with tqdm(\n total=int(source.info().get(\"Content-Length\")),\n ncols=80,\n unit=\"iB\",\n unit_scale=True,\n ) as loop:\n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if (\n expected_sha256\n and hashlib.sha256(open(download_target, \"rb\").read()).hexdigest()\n != expected_sha256\n ):\n raise RuntimeError(\n f\"Model has been downloaded but the SHA256 checksum does not not match\"\n )\n\n return download_target"
},
{
"identifier": "image_transform",
"path": "audioldm/clap/open_clip/transform.py",
"snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean=(0.48145466, 0.4578275, 0.40821073),\n std=(0.26862954, 0.26130258, 0.27577711),\n):\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n return Compose(\n [\n RandomResizedCrop(\n image_size,\n scale=(0.9, 1.0),\n interpolation=InterpolationMode.BICUBIC,\n ),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n )\n else:\n return Compose(\n [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n )"
}
] | import json
import logging
import os
import pathlib
import re
import torch
from copy import deepcopy
from pathlib import Path
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform | 7,245 | global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
if os.path.basename(cf)[0] == ".":
continue # Ignore hidden files
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = "None"
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type,
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache/audioldm")
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
if os.path.basename(cf)[0] == ".":
continue # Ignore hidden files
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = "None"
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type,
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url: | checkpoint_path = download_pretrained(url, root=openai_model_cache_dir) | 4 | 2023-11-14 23:29:31+00:00 | 12k |
BraveGroup/Drive-WM | tests/pipelines/shap_e/test_shap_e_img2img.py | [
{
"identifier": "PipelineTesterMixin",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n # Canonical parameters that are passed to `__call__` regardless\n # of the type of pipeline. They are always optional and have common\n # sense default values.\n required_optional_params = frozenset(\n [\n \"num_inference_steps\",\n \"num_images_per_prompt\",\n \"generator\",\n \"latents\",\n \"output_type\",\n \"return_dict\",\n ]\n )\n\n # set these parameters to False in the child class if the pipeline does not support the corresponding functionality\n test_attention_slicing = True\n\n test_xformers_attention = True\n\n def get_generator(self, seed):\n device = torch_device if torch_device != \"mps\" else \"cpu\"\n generator = torch.Generator(device).manual_seed(seed)\n return generator\n\n @property\n def pipeline_class(self) -> Union[Callable, DiffusionPipeline]:\n raise NotImplementedError(\n \"You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_components(self):\n raise NotImplementedError(\n \"You need to implement `get_dummy_components(self)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_inputs(self, device, seed=0):\n raise NotImplementedError(\n \"You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `params` in the child test class. \"\n \"`params` are checked for if all values are present in `__call__`'s signature.\"\n \" You can set `params` using one of the common set of parameters defined in `pipeline_params.py`\"\n \" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to \"\n \"image pipelines, including prompts and prompt embedding overrides.\"\n \"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, \"\n \"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline \"\n \"with non-configurable height and width arguments should set the attribute as \"\n \"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def batch_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `batch_params` in the child test class. \"\n \"`batch_params` are the parameters required to be batched when passed to the pipeline's \"\n \"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as \"\n \"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's \"\n \"set of batch arguments has minor changes from one of the common sets of batch arguments, \"\n \"do not make modifications to the existing common sets of batch arguments. I.e. a text to \"\n \"image pipeline `negative_prompt` is not batched should set the attribute as \"\n \"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def callback_cfg_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. \"\n \"`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback \"\n \"function when dynamically adjusting `guidance_scale`. They are variables that require special\"\n \"treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common\"\n \" sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's \"\n \"set of cfg arguments has minor changes from one of the common sets of cfg arguments, \"\n \"do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you \"\n \" need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as\"\n \"`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`\"\n )\n\n def tearDown(self):\n # clean up the VRAM after each test in case of CUDA runtime errors\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_save_load_local(self, expected_max_difference=5e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n logger = logging.get_logger(\"diffusers.pipelines.pipeline_utils\")\n logger.setLevel(diffusers.logging.INFO)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n\n with CaptureLogger(logger) as cap_logger:\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n\n for name in pipe_loaded.components.keys():\n if name not in pipe_loaded._optional_components:\n assert name in str(cap_logger)\n\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_pipeline_call_signature(self):\n self.assertTrue(\n hasattr(self.pipeline_class, \"__call__\"), f\"{self.pipeline_class} should have a `__call__` method\"\n )\n\n parameters = inspect.signature(self.pipeline_class.__call__).parameters\n\n optional_parameters = set()\n\n for k, v in parameters.items():\n if v.default != inspect._empty:\n optional_parameters.add(k)\n\n parameters = set(parameters.keys())\n parameters.remove(\"self\")\n parameters.discard(\"kwargs\") # kwargs can be added if arguments of pipeline call function are deprecated\n\n remaining_required_parameters = set()\n\n for param in self.params:\n if param not in parameters:\n remaining_required_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_parameters) == 0,\n f\"Required parameters not present: {remaining_required_parameters}\",\n )\n\n remaining_required_optional_parameters = set()\n\n for param in self.required_optional_params:\n if param not in optional_parameters:\n remaining_required_optional_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_optional_parameters) == 0,\n f\"Required optional parameters not present: {remaining_required_optional_parameters}\",\n )\n\n def test_inference_batch_consistent(self, batch_sizes=[2]):\n self._test_inference_batch_consistent(batch_sizes=batch_sizes)\n\n def _test_inference_batch_consistent(\n self, batch_sizes=[2], additional_params_copy_to_batched_inputs=[\"num_inference_steps\"]\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # prepare batched inputs\n batched_inputs = []\n for batch_size in batch_sizes:\n batched_input = {}\n batched_input.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n # make unequal batch sizes\n batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n\n # make last batch super long\n batched_input[name][-1] = 100 * \"very long\"\n\n else:\n batched_input[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_input[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_input[\"batch_size\"] = batch_size\n\n batched_inputs.append(batched_input)\n\n logger.setLevel(level=diffusers.logging.WARNING)\n for batch_size, batched_input in zip(batch_sizes, batched_inputs):\n output = pipe(**batched_input)\n assert len(output[0]) == batch_size\n\n def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):\n self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)\n\n def _test_inference_batch_single_identical(\n self,\n batch_size=2,\n expected_max_diff=1e-4,\n additional_params_copy_to_batched_inputs=[\"num_inference_steps\"],\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for components in pipe.components.values():\n if hasattr(components, \"set_default_attn_processor\"):\n components.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is has been used in self.get_dummy_inputs\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # batchify inputs\n batched_inputs = {}\n batched_inputs.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n batched_inputs[name][-1] = 100 * \"very long\"\n\n else:\n batched_inputs[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_inputs[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_inputs[\"batch_size\"] = batch_size\n\n for arg in additional_params_copy_to_batched_inputs:\n batched_inputs[arg] = inputs[arg]\n\n output = pipe(**inputs)\n output_batch = pipe(**batched_inputs)\n\n assert output_batch[0].shape[0] == batch_size\n\n max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()\n assert max_diff < expected_max_diff\n\n def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n output = pipe(**self.get_dummy_inputs(generator_device))[0]\n output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_components_function(self):\n init_components = self.get_dummy_components()\n init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))}\n\n pipe = self.pipeline_class(**init_components)\n\n self.assertTrue(hasattr(pipe, \"components\"))\n self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_float16_inference(self, expected_max_diff=5e-2):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n components = self.get_dummy_components()\n pipe_fp16 = self.pipeline_class(**components)\n for component in pipe_fp16.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe_fp16.to(torch_device, torch.float16)\n pipe_fp16.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in inputs:\n inputs[\"generator\"] = self.get_generator(0)\n\n output = pipe(**inputs)[0]\n\n fp16_inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in fp16_inputs:\n fp16_inputs[\"generator\"] = self.get_generator(0)\n\n output_fp16 = pipe_fp16(**fp16_inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()\n self.assertLess(max_diff, expected_max_diff, \"The outputs of the fp16 and fp32 pipelines are too different.\")\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_save_load_float16(self, expected_max_diff=1e-2):\n components = self.get_dummy_components()\n for name, module in components.items():\n if hasattr(module, \"half\"):\n components[name] = module.to(torch_device).half()\n\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for name, component in pipe_loaded.components.items():\n if hasattr(component, \"dtype\"):\n self.assertTrue(\n component.dtype == torch.float16,\n f\"`{name}.dtype` switched from `float16` to {component.dtype} after loading.\",\n )\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(\n max_diff, expected_max_diff, \"The output of the fp16 pipeline changed after saving and loading.\"\n )\n\n def test_save_load_optional_components(self, expected_max_difference=1e-4):\n if not hasattr(self.pipeline_class, \"_optional_components\"):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"CUDA and CPU are required to switch devices\")\n def test_to_device(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n pipe.to(\"cpu\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cpu\" for device in model_devices))\n\n output_cpu = pipe(**self.get_dummy_inputs(\"cpu\"))[0]\n self.assertTrue(np.isnan(output_cpu).sum() == 0)\n\n pipe.to(\"cuda\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cuda\" for device in model_devices))\n\n output_cuda = pipe(**self.get_dummy_inputs(\"cuda\"))[0]\n self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)\n\n def test_to_dtype(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))\n\n pipe.to(torch_dtype=torch.float16)\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))\n\n def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3):\n self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)\n\n def _test_attention_slicing_forward_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3\n ):\n if not self.test_attention_slicing:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_slicing = pipe(**inputs)[0]\n\n pipe.enable_attention_slicing(slice_size=1)\n inputs = self.get_dummy_inputs(generator_device)\n output_with_slicing = pipe(**inputs)[0]\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max()\n self.assertLess(max_diff, expected_max_diff, \"Attention slicing should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.14.0` or higher\",\n )\n def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_sequential_cpu_offload()\n\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.17.0` or higher\",\n )\n def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):\n generator_device = \"cpu\"\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_model_cpu_offload()\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n offloaded_modules = [\n v\n for k, v in pipe.components.items()\n if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload\n ]\n (\n self.assertTrue(all(v.device.type == \"cpu\" for v in offloaded_modules)),\n f\"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}\",\n )\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_xformers_available(),\n reason=\"XFormers attention is only available with CUDA and `xformers` installed\",\n )\n def test_xformers_attention_forwardGenerator_pass(self):\n self._test_xformers_attention_forwardGenerator_pass()\n\n def _test_xformers_attention_forwardGenerator_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4\n ):\n if not self.test_xformers_attention:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_without_offload = pipe(**inputs)[0]\n output_without_offload = (\n output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload\n )\n\n pipe.enable_xformers_memory_efficient_attention()\n inputs = self.get_dummy_inputs(torch_device)\n output_with_offload = pipe(**inputs)[0]\n output_with_offload = (\n output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload\n )\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"XFormers attention should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])\n\n def test_progress_bar(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n\n inputs = self.get_dummy_inputs(torch_device)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n stderr = stderr.getvalue()\n # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,\n # so we just match \"5\" in \"#####| 1/5 [00:01<00:00]\"\n max_steps = re.search(\"/(.*?) \", stderr).group(1)\n self.assertTrue(max_steps is not None and len(max_steps) > 0)\n self.assertTrue(\n f\"{max_steps}/{max_steps}\" in stderr, \"Progress bar should be enabled and stopped at the max step\"\n )\n\n pipe.set_progress_bar_config(disable=True)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n self.assertTrue(stderr.getvalue() == \"\", \"Progress bar should be disabled\")\n\n def test_num_images_per_prompt(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"num_images_per_prompt\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n batch_sizes = [1, 2]\n num_images_per_prompts = [1, 2]\n\n for batch_size in batch_sizes:\n for num_images_per_prompt in num_images_per_prompts:\n inputs = self.get_dummy_inputs(torch_device)\n\n for key in inputs.keys():\n if key in self.batch_params:\n inputs[key] = batch_size * [inputs[key]]\n\n images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]\n\n assert images.shape[0] == batch_size * num_images_per_prompt\n\n def test_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n\n inputs[\"guidance_scale\"] = 1.0\n out_no_cfg = pipe(**inputs)[0]\n\n inputs[\"guidance_scale\"] = 7.5\n out_cfg = pipe(**inputs)[0]\n\n assert out_cfg.shape == out_no_cfg.shape\n\n def test_callback_inputs(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_inputs_subset(pipe, i, t, callback_kwargs):\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n def callback_inputs_all(pipe, i, t, callback_kwargs):\n for tensor_name in pipe._callback_tensor_inputs:\n assert tensor_name in callback_kwargs\n\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # Test passing in a subset\n inputs[\"callback_on_step_end\"] = callback_inputs_subset\n inputs[\"callback_on_step_end_tensor_inputs\"] = [\"latents\"]\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n # Test passing in a everything\n inputs[\"callback_on_step_end\"] = callback_inputs_all\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):\n is_last = i == (pipe.num_timesteps - 1)\n if is_last:\n callback_kwargs[\"latents\"] = torch.zeros_like(callback_kwargs[\"latents\"])\n return callback_kwargs\n\n inputs[\"callback_on_step_end\"] = callback_inputs_change_tensor\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n assert output.abs().sum() == 0\n\n def test_callback_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_increase_guidance(pipe, i, t, callback_kwargs):\n pipe._guidance_scale += 1.0\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # use cfg guidance because some pipelines modify the shape of the latents\n # outside of the denoising loop\n inputs[\"guidance_scale\"] = 2.0\n inputs[\"callback_on_step_end\"] = callback_increase_guidance\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n _ = pipe(**inputs)[0]\n\n # we increase the guidance scale by 1.0 at every step\n # check that the guidance scale is increased by the number of scheduler timesteps\n # accounts for models that modify the number of inference steps based on strength\n assert pipe.guidance_scale == (inputs[\"guidance_scale\"] + pipe.num_timesteps)"
},
{
"identifier": "assert_mean_pixel_difference",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10):\n image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32)\n expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32)\n avg_diff = np.abs(image - expected_image).mean()\n assert avg_diff < expected_max_diff, f\"Error image deviates {avg_diff} pixels on average\""
}
] | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils.testing_utils import (
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference | 9,659 | "image_encoder": image_encoder,
"image_processor": image_processor,
"shap_e_renderer": shap_e_renderer,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "latent",
}
return inputs
def test_shap_e(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images[0]
image_slice = image[-3:, -3:].cpu().numpy()
assert image.shape == (32, 16)
expected_slice = np.array(
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_consistent(self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[2])
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
batch_size=2,
expected_max_diff=6e-3,
)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_size = 1
num_images_per_prompt = 2
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
@unittest.skip("Key error is raised with accelerate")
def test_sequential_cpu_offload_forward_pass(self):
pass
@nightly
@require_torch_gpu
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_shap_e_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy"
)
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
images = pipe(
input_image,
generator=generator,
guidance_scale=3.0,
num_inference_steps=64,
frame_size=64,
output_type="np",
).images[0]
assert images.shape == (20, 64, 64, 3)
| # Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = ShapEImg2ImgPipeline
params = ["image"]
batch_params = ["image"]
required_optional_params = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 16
@property
def time_input_dim(self):
return 16
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def renderer_dim(self):
return 8
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
image_size=32,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=24,
num_attention_heads=2,
num_channels=3,
num_hidden_layers=5,
patch_size=1,
)
model = CLIPVisionModel(config)
return model
@property
def dummy_image_processor(self):
image_processor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
return image_processor
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
model = PriorTransformer(**model_kwargs)
return model
@property
def dummy_renderer(self):
torch.manual_seed(0)
model_kwargs = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
model = ShapERenderer(**model_kwargs)
return model
def get_dummy_components(self):
prior = self.dummy_prior
image_encoder = self.dummy_image_encoder
image_processor = self.dummy_image_processor
shap_e_renderer = self.dummy_renderer
scheduler = HeunDiscreteScheduler(
beta_schedule="exp",
num_train_timesteps=1024,
prediction_type="sample",
use_karras_sigmas=True,
clip_sample=True,
clip_sample_range=1.0,
)
components = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"shap_e_renderer": shap_e_renderer,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "latent",
}
return inputs
def test_shap_e(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images[0]
image_slice = image[-3:, -3:].cpu().numpy()
assert image.shape == (32, 16)
expected_slice = np.array(
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_consistent(self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[2])
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
batch_size=2,
expected_max_diff=6e-3,
)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_size = 1
num_images_per_prompt = 2
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
@unittest.skip("Key error is raised with accelerate")
def test_sequential_cpu_offload_forward_pass(self):
pass
@nightly
@require_torch_gpu
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_shap_e_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy"
)
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
images = pipe(
input_image,
generator=generator,
guidance_scale=3.0,
num_inference_steps=64,
frame_size=64,
output_type="np",
).images[0]
assert images.shape == (20, 64, 64, 3)
| assert_mean_pixel_difference(images, expected_image) | 1 | 2023-11-18 01:40:55+00:00 | 12k |
basnijholt/unidep | unidep/_cli.py | [
{
"identifier": "create_conda_env_specification",
"path": "unidep/_conda_env.py",
"snippet": "def create_conda_env_specification( # noqa: PLR0912\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n channels: list[str],\n platforms: list[Platform],\n selector: Literal[\"sel\", \"comment\"] = \"sel\",\n) -> CondaEnvironmentSpec:\n \"\"\"Create a conda environment specification from resolved requirements.\"\"\"\n if selector not in (\"sel\", \"comment\"): # pragma: no cover\n msg = f\"Invalid selector: {selector}, must be one of ['sel', 'comment']\"\n raise ValueError(msg)\n\n # Split in conda and pip dependencies and prefer conda over pip\n conda, pip = _extract_conda_pip_dependencies(resolved)\n\n conda_deps: list[str | dict[str, str]] = CommentedSeq()\n pip_deps: list[str] = CommentedSeq()\n seen_identifiers: set[str] = set()\n for platform_to_spec in conda.values():\n if len(platform_to_spec) > 1 and selector == \"sel\":\n # None has been expanded already if len>1\n _resolve_multiple_platform_conflicts(platform_to_spec)\n for _platform, spec in sorted(platform_to_spec.items()):\n dep_str = spec.name_with_pin()\n if len(platforms) != 1 and _platform is not None:\n if selector == \"sel\":\n sel = _conda_sel(_platform)\n dep_str = {f\"sel({sel})\": dep_str} # type: ignore[assignment]\n conda_deps.append(dep_str)\n if selector == \"comment\":\n _add_comment(conda_deps, _platform)\n else:\n conda_deps.append(dep_str)\n assert isinstance(spec.identifier, str)\n seen_identifiers.add(spec.identifier)\n\n for platform_to_spec in pip.values():\n spec_to_platforms: dict[Spec, list[Platform | None]] = {}\n for _platform, spec in platform_to_spec.items():\n spec_to_platforms.setdefault(spec, []).append(_platform)\n\n for spec, _platforms in spec_to_platforms.items():\n if spec.identifier in seen_identifiers:\n continue\n\n dep_str = spec.name_with_pin(is_pip=True)\n if _platforms != [None] and len(platforms) != 1:\n if selector == \"sel\":\n marker = build_pep508_environment_marker(_platforms) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {marker}\"\n pip_deps.append(dep_str)\n else:\n assert selector == \"comment\"\n # We can only add comments with a single platform because\n # `conda-lock` doesn't implement logic, e.g., [linux or win]\n # should be spread into two lines, one with [linux] and the\n # other with [win].\n for _platform in _platforms:\n pip_deps.append(dep_str)\n _add_comment(pip_deps, cast(Platform, _platform))\n else:\n pip_deps.append(dep_str)\n\n return CondaEnvironmentSpec(channels, platforms, conda_deps, pip_deps)"
},
{
"identifier": "write_conda_environment_file",
"path": "unidep/_conda_env.py",
"snippet": "def write_conda_environment_file(\n env_spec: CondaEnvironmentSpec,\n output_file: str | Path | None = \"environment.yaml\",\n name: str = \"myenv\",\n *,\n verbose: bool = False,\n) -> None:\n \"\"\"Generate a conda environment.yaml file or print to stdout.\"\"\"\n resolved_dependencies = deepcopy(env_spec.conda)\n if env_spec.pip:\n resolved_dependencies.append({\"pip\": env_spec.pip}) # type: ignore[arg-type, dict-item]\n env_data = CommentedMap({\"name\": name})\n if env_spec.channels:\n env_data[\"channels\"] = env_spec.channels\n if resolved_dependencies:\n env_data[\"dependencies\"] = resolved_dependencies\n if env_spec.platforms:\n env_data[\"platforms\"] = env_spec.platforms\n yaml = YAML(typ=\"rt\")\n yaml.default_flow_style = False\n yaml.width = 4096\n yaml.indent(mapping=2, sequence=2, offset=2)\n if output_file:\n if verbose:\n print(f\"📝 Generating environment file at `{output_file}`\")\n with open(output_file, \"w\") as f: # noqa: PTH123\n yaml.dump(env_data, f)\n if verbose:\n print(\"📝 Environment file generated successfully.\")\n add_comment_to_file(output_file)\n else:\n yaml.dump(env_data, sys.stdout)"
},
{
"identifier": "conda_lock_command",
"path": "unidep/_conda_lock.py",
"snippet": "def conda_lock_command(\n *,\n depth: int,\n directory: Path,\n platform: list[Platform],\n verbose: bool,\n only_global: bool,\n check_input_hash: bool,\n ignore_pins: list[str],\n skip_dependencies: list[str],\n overwrite_pins: list[str],\n lockfile: str = \"conda-lock.yml\",\n) -> None:\n \"\"\"Generate a conda-lock file a collection of `requirements.yaml` and/or `pyproject.toml` files.\"\"\" # noqa: E501\n conda_lock_output = _conda_lock_global(\n depth=depth,\n directory=directory,\n platform=platform,\n verbose=verbose,\n check_input_hash=check_input_hash,\n ignore_pins=ignore_pins,\n overwrite_pins=overwrite_pins,\n skip_dependencies=skip_dependencies,\n lockfile=lockfile,\n )\n if only_global:\n return\n sub_lock_files = _conda_lock_subpackages(\n directory=directory,\n depth=depth,\n conda_lock_file=conda_lock_output,\n )\n mismatches = _check_consistent_lock_files(\n global_lock_file=conda_lock_output,\n sub_lock_files=sub_lock_files,\n )\n if not mismatches:\n print(\"✅ Analyzed all lock files and found no inconsistencies.\")\n elif len(mismatches) > 1: # pragma: no cover\n print(\"❌ Complete table of package version mismatches:\")\n _mismatch_report(mismatches, raises=False)"
},
{
"identifier": "resolve_conflicts",
"path": "unidep/_conflicts.py",
"snippet": "def resolve_conflicts(\n requirements: dict[str, list[Spec]],\n platforms: list[Platform] | None = None,\n) -> dict[str, dict[Platform | None, dict[CondaPip, Spec]]]:\n \"\"\"Resolve conflicts in a dictionary of requirements.\n\n Uses the ``ParsedRequirements.requirements`` dict returned by\n `parse_requirements`.\n \"\"\"\n if platforms and not set(platforms).issubset(get_args(Platform)):\n msg = f\"Invalid platform: {platforms}, must contain only {get_args(Platform)}\"\n raise VersionConflictError(msg)\n\n prepared = _prepare_specs_for_conflict_resolution(requirements)\n for data in prepared.values():\n _pop_unused_platforms_and_maybe_expand_none(data, platforms)\n resolved = {\n pkg: _combine_pinning_within_platform(data) for pkg, data in prepared.items()\n }\n\n for _platforms in resolved.values():\n for _platform, sources in _platforms.items():\n _platforms[_platform] = _resolve_conda_pip_conflicts(sources)\n return resolved"
},
{
"identifier": "find_requirements_files",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def find_requirements_files(\n base_dir: str | Path = \".\",\n depth: int = 1,\n *,\n verbose: bool = False,\n) -> list[Path]:\n \"\"\"Scan a directory for `requirements.yaml` and `pyproject.toml` files.\"\"\"\n base_path = Path(base_dir)\n found_files = []\n\n # Define a helper function to recursively scan directories\n def _scan_dir(path: Path, current_depth: int) -> None:\n if verbose:\n print(f\"🔍 Scanning in `{path}` at depth {current_depth}\")\n if current_depth > depth:\n return\n for child in path.iterdir():\n if child.is_dir():\n _scan_dir(child, current_depth + 1)\n elif child.name == \"requirements.yaml\":\n found_files.append(child)\n if verbose:\n print(f'🔍 Found `\"requirements.yaml\"` at `{child}`')\n elif child.name == \"pyproject.toml\" and unidep_configured_in_toml(child):\n if verbose:\n print(f'🔍 Found `\"pyproject.toml\"` with dependencies at `{child}`')\n found_files.append(child)\n\n _scan_dir(base_path, 0)\n return sorted(found_files)"
},
{
"identifier": "parse_local_dependencies",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def parse_local_dependencies(\n *paths: Path,\n check_pip_installable: bool = True,\n verbose: bool = False,\n) -> dict[Path, list[Path]]:\n \"\"\"Extract local project dependencies from a list of `requirements.yaml` or `pyproject.toml` files.\n\n Works by loading the specified `local_dependencies` list.\n \"\"\" # noqa: E501\n dependencies: dict[str, set[str]] = defaultdict(set)\n\n for p in paths:\n if verbose:\n print(f\"🔗 Analyzing dependencies in `{p}`\")\n base_path = p.resolve().parent\n _extract_local_dependencies(\n path=p,\n base_path=base_path,\n processed=set(),\n dependencies=dependencies,\n check_pip_installable=check_pip_installable,\n verbose=verbose,\n )\n\n return {\n Path(k): sorted({Path(v) for v in v_set})\n for k, v_set in sorted(dependencies.items())\n }"
},
{
"identifier": "parse_requirements",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def parse_requirements( # noqa: PLR0912\n *paths: Path,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n verbose: bool = False,\n) -> ParsedRequirements:\n \"\"\"Parse a list of `requirements.yaml` or `pyproject.toml` files.\"\"\"\n ignore_pins = ignore_pins or []\n skip_dependencies = skip_dependencies or []\n overwrite_pins_map = _parse_overwrite_pins(overwrite_pins or [])\n requirements: dict[str, list[Spec]] = defaultdict(list)\n channels: set[str] = set()\n platforms: set[Platform] = set()\n datas = []\n seen: set[Path] = set()\n yaml = YAML(typ=\"rt\")\n for p in paths:\n if verbose:\n print(f\"📄 Parsing `{p}`\")\n data = _load(p, yaml)\n datas.append(data)\n seen.add(p.resolve())\n\n # Handle \"local_dependencies\" (or old name \"includes\", changed in 0.42.0)\n for include in _get_local_dependencies(data):\n try:\n requirements_path = dependencies_filename(p.parent / include).resolve()\n except FileNotFoundError:\n # Means that this is a local package that is not managed by unidep.\n # We do not need to do anything here, just in `unidep install`.\n continue\n if requirements_path in seen:\n continue # Avoids circular local_dependencies\n if verbose:\n print(f\"📄 Parsing `{include}` from `local_dependencies`\")\n datas.append(_load(requirements_path, yaml))\n seen.add(requirements_path)\n\n identifier = -1\n for data in datas:\n for channel in data.get(\"channels\", []):\n channels.add(channel)\n for _platform in data.get(\"platforms\", []):\n platforms.add(_platform)\n if \"dependencies\" not in data:\n continue\n dependencies = data[\"dependencies\"]\n for i, dep in enumerate(data[\"dependencies\"]):\n identifier += 1\n if isinstance(dep, str):\n specs = _parse_dependency(\n dep,\n dependencies,\n i,\n \"both\",\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n continue\n assert isinstance(dep, dict)\n for which in [\"conda\", \"pip\"]:\n if which in dep:\n specs = _parse_dependency(\n dep[which],\n dep,\n which,\n which, # type: ignore[arg-type]\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n\n return ParsedRequirements(sorted(channels), sorted(platforms), dict(requirements))"
},
{
"identifier": "filter_python_dependencies",
"path": "unidep/_setuptools_integration.py",
"snippet": "def filter_python_dependencies(\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n) -> list[str]:\n \"\"\"Filter out conda dependencies and return only pip dependencies.\n\n Examples\n --------\n >>> requirements = parse_requirements(\"requirements.yaml\")\n >>> resolved = resolve_conflicts(\n ... requirements.requirements, requirements.platforms\n ... )\n >>> python_deps = filter_python_dependencies(resolved)\n \"\"\"\n pip_deps = []\n for platform_data in resolved.values():\n to_process: dict[Platform | None, Spec] = {} # platform -> Spec\n for _platform, sources in platform_data.items():\n pip_spec = sources.get(\"pip\")\n if pip_spec:\n to_process[_platform] = pip_spec\n if not to_process:\n continue\n\n # Check if all Spec objects are identical\n first_spec = next(iter(to_process.values()))\n if all(spec == first_spec for spec in to_process.values()):\n # Build a single combined environment marker\n dep_str = first_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker(list(to_process.keys())) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n continue\n\n for _platform, pip_spec in to_process.items():\n dep_str = pip_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker([_platform])\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n return sorted(pip_deps)"
},
{
"identifier": "get_python_dependencies",
"path": "unidep/_setuptools_integration.py",
"snippet": "def get_python_dependencies(\n filename: str\n | Path\n | Literal[\"requirements.yaml\", \"pyproject.toml\"] = \"requirements.yaml\", # noqa: PYI051\n *,\n verbose: bool = False,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n platforms: list[Platform] | None = None,\n raises_if_missing: bool = True,\n) -> list[str]:\n \"\"\"Extract Python (pip) requirements from a `requirements.yaml` or `pyproject.toml` file.\"\"\" # noqa: E501\n p = Path(filename)\n if not p.exists():\n if raises_if_missing:\n msg = f\"File {filename} not found.\"\n raise FileNotFoundError(msg)\n return []\n\n requirements = parse_requirements(\n p,\n ignore_pins=ignore_pins,\n overwrite_pins=overwrite_pins,\n skip_dependencies=skip_dependencies,\n verbose=verbose,\n )\n resolved = resolve_conflicts(\n requirements.requirements,\n platforms or list(requirements.platforms),\n )\n return filter_python_dependencies(resolved)"
},
{
"identifier": "__version__",
"path": "unidep/_version.py",
"snippet": ""
},
{
"identifier": "Platform",
"path": "unidep/platform_definitions.py",
"snippet": "VALID_SELECTORS = get_args(Selector)\nPEP508_MARKERS = {\n \"linux-64\": \"sys_platform == 'linux' and platform_machine == 'x86_64'\",\n \"linux-aarch64\": \"sys_platform == 'linux' and platform_machine == 'aarch64'\",\n \"linux-ppc64le\": \"sys_platform == 'linux' and platform_machine == 'ppc64le'\",\n \"osx-64\": \"sys_platform == 'darwin' and platform_machine == 'x86_64'\",\n \"osx-arm64\": \"sys_platform == 'darwin' and platform_machine == 'arm64'\",\n \"win-64\": \"sys_platform == 'win32' and platform_machine == 'AMD64'\",\n (\"linux-64\", \"linux-aarch64\", \"linux-ppc64le\"): \"sys_platform == 'linux'\",\n (\"osx-64\", \"osx-arm64\"): \"sys_platform == 'darwin'\",\n (\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"osx-arm64\",\n ): \"sys_platform == 'linux' or sys_platform == 'darwin'\",\n}\nPLATFORM_SELECTOR_MAP: dict[Platform, list[Selector]] = {\n \"linux-64\": [\"linux64\", \"unix\", \"linux\"],\n \"linux-aarch64\": [\"aarch64\", \"unix\", \"linux\"],\n \"linux-ppc64le\": [\"ppc64le\", \"unix\", \"linux\"],\n # \"osx64\" is a selector unique to conda-build referring to\n # platforms on macOS and the Python architecture is x86-64\n \"osx-64\": [\"osx64\", \"osx\", \"macos\", \"unix\"],\n \"osx-arm64\": [\"arm64\", \"osx\", \"macos\", \"unix\"],\n \"win-64\": [\"win64\", \"win\"],\n}\nPLATFORM_SELECTOR_MAP_REVERSE: dict[Selector, set[Platform]] = {}\ndef validate_selector(selector: Selector) -> None:\ndef platforms_from_selector(selector: str) -> list[Platform]:\n def platforms(self) -> list[Platform] | None:\n def pprint(self) -> str:\n def name_with_pin(self, *, is_pip: bool = False) -> str:\nclass Spec(NamedTuple):"
},
{
"identifier": "add_comment_to_file",
"path": "unidep/utils.py",
"snippet": "def add_comment_to_file(\n filename: str | Path,\n extra_lines: list[str] | None = None,\n) -> None:\n \"\"\"Add a comment to the top of a file.\"\"\"\n if extra_lines is None:\n extra_lines = []\n with open(filename, \"r+\") as f: # noqa: PTH123\n content = f.read()\n f.seek(0, 0)\n command_line_args = \" \".join(sys.argv[1:])\n txt = [\n f\"# This file is created and managed by `unidep` {__version__}.\",\n \"# For details see https://github.com/basnijholt/unidep\",\n f\"# File generated with: `unidep {command_line_args}`\",\n *extra_lines,\n ]\n content = \"\\n\".join(txt) + \"\\n\\n\" + content\n f.write(content)"
},
{
"identifier": "dependencies_filename",
"path": "unidep/utils.py",
"snippet": "def dependencies_filename(folder_or_path: str | Path) -> Path:\n \"\"\"Get the path to `requirements.yaml` or `pyproject.toml` file.\"\"\"\n path = Path(folder_or_path)\n if path.is_dir():\n fname_yaml = path / \"requirements.yaml\"\n if fname_yaml.exists():\n return fname_yaml\n fname_toml = path / \"pyproject.toml\"\n if fname_toml.exists() and unidep_configured_in_toml(fname_toml):\n return fname_toml\n msg = (\n f\"File `{fname_yaml}` or `{fname_toml}` (with unidep configuration)\"\n f\" not found in `{folder_or_path}`.\"\n )\n raise FileNotFoundError(msg)\n if not path.exists():\n msg = f\"File `{path}` not found.\"\n raise FileNotFoundError(msg)\n return path"
},
{
"identifier": "escape_unicode",
"path": "unidep/utils.py",
"snippet": "def escape_unicode(string: str) -> str:\n \"\"\"Escape unicode characters.\"\"\"\n return codecs.decode(string, \"unicode_escape\")"
},
{
"identifier": "identify_current_platform",
"path": "unidep/utils.py",
"snippet": "def identify_current_platform() -> Platform:\n \"\"\"Detect the current platform.\"\"\"\n system = platform.system().lower()\n architecture = platform.machine().lower()\n\n if system == \"linux\":\n if architecture == \"x86_64\":\n return \"linux-64\"\n if architecture == \"aarch64\":\n return \"linux-aarch64\"\n if architecture == \"ppc64le\":\n return \"linux-ppc64le\"\n msg = f\"Unsupported Linux architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n if system == \"darwin\":\n if architecture == \"x86_64\":\n return \"osx-64\"\n if architecture == \"arm64\":\n return \"osx-arm64\"\n msg = f\"Unsupported macOS architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n if system == \"windows\":\n if \"64\" in architecture:\n return \"win-64\"\n msg = f\"Unsupported Windows architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)\n msg = f\"Unsupported operating system `{system}` with architecture `{architecture}`\"\n raise UnsupportedPlatformError(msg)"
},
{
"identifier": "is_pip_installable",
"path": "unidep/utils.py",
"snippet": "def is_pip_installable(folder: str | Path) -> bool: # pragma: no cover\n \"\"\"Determine if the project is pip installable.\n\n Checks for existence of setup.py or [build-system] in pyproject.toml.\n \"\"\"\n path = Path(folder)\n if (path / \"setup.py\").exists():\n return True\n\n # When toml makes it into the standard library, we can use that instead\n # For now this is good enough, except it doesn't handle the case where\n # [build-system] is inside of a multi-line literal string.\n pyproject_path = path / \"pyproject.toml\"\n if pyproject_path.exists():\n with pyproject_path.open(\"r\") as file:\n for line in file:\n if line.strip().startswith(\"[build-system]\"):\n return True\n return False"
},
{
"identifier": "parse_package_str",
"path": "unidep/utils.py",
"snippet": "def parse_package_str(package_str: str) -> ParsedPackageStr:\n \"\"\"Splits a string into package name, version pinning, and platform selector.\"\"\"\n # Regex to match package name, version pinning, and optionally platform selector\n name_pattern = r\"[a-zA-Z0-9_-]+\"\n version_pin_pattern = r\".*?\"\n selector_pattern = r\"[a-z0-9\\s]+\"\n pattern = rf\"({name_pattern})\\s*({version_pin_pattern})?(:({selector_pattern}))?$\"\n match = re.match(pattern, package_str)\n\n if match:\n package_name = match.group(1).strip()\n version_pin = match.group(2).strip() if match.group(2) else None\n selector = match.group(4).strip() if match.group(4) else None\n\n if selector is not None:\n for s in selector.split():\n validate_selector(cast(Selector, s))\n\n return ParsedPackageStr(\n package_name,\n version_pin,\n selector,\n )\n\n msg = f\"Invalid package string: '{package_str}'\"\n raise ValueError(msg)"
},
{
"identifier": "warn",
"path": "unidep/utils.py",
"snippet": "def warn(\n message: str | Warning,\n category: type[Warning] = UserWarning,\n stacklevel: int = 1,\n) -> None:\n \"\"\"Emit a warning with a custom format specific to this package.\"\"\"\n original_format = warnings.formatwarning\n warnings.formatwarning = _simple_warning_format\n try:\n warnings.warn(message, category, stacklevel=stacklevel + 1)\n finally:\n warnings.formatwarning = original_format"
}
] | import argparse
import importlib.util
import os
import shutil
import subprocess
import sys
from pathlib import Path
from unidep._conda_env import (
create_conda_env_specification,
write_conda_environment_file,
)
from unidep._conda_lock import conda_lock_command
from unidep._conflicts import resolve_conflicts
from unidep._dependencies_parsing import (
find_requirements_files,
parse_local_dependencies,
parse_requirements,
)
from unidep._setuptools_integration import (
filter_python_dependencies,
get_python_dependencies,
)
from unidep._version import __version__
from unidep.platform_definitions import Platform
from unidep.utils import (
add_comment_to_file,
dependencies_filename,
escape_unicode,
identify_current_platform,
is_pip_installable,
parse_package_str,
warn,
)
from typing import Literal, get_args
from typing_extensions import Literal, get_args
from rich_argparse import RichHelpFormatter
from argparse import HelpFormatter as _HelpFormatter # type: ignore[assignment] | 8,256 | def _identify_conda_executable() -> str: # pragma: no cover
"""Identify the conda executable to use.
This function checks for micromamba, mamba, and conda in that order.
"""
if shutil.which("micromamba"):
return "micromamba"
if shutil.which("mamba"):
return "mamba"
if shutil.which("conda"):
return "conda"
msg = "Could not identify conda executable."
raise RuntimeError(msg)
def _format_inline_conda_package(package: str) -> str:
pkg = parse_package_str(package)
if pkg.pin is None:
return pkg.name
return f'{pkg.name}"{pkg.pin.strip()}"'
def _pip_install_local(
*folders: str | Path,
editable: bool,
dry_run: bool,
flags: list[str] | None = None,
) -> None: # pragma: no cover
pip_command = [sys.executable, "-m", "pip", "install"]
if flags:
pip_command.extend(flags)
for folder in sorted(folders):
if not os.path.isabs(folder): # noqa: PTH117
relative_prefix = ".\\" if os.name == "nt" else "./"
folder = f"{relative_prefix}{folder}" # noqa: PLW2901
if editable:
pip_command.extend(["-e", str(folder)])
else:
pip_command.append(str(folder))
print(f"📦 Installing project with `{' '.join(pip_command)}`\n")
if not dry_run:
subprocess.run(pip_command, check=True) # noqa: S603
def _install_command( # noqa: PLR0912
*files: Path,
conda_executable: str,
dry_run: bool,
editable: bool,
skip_local: bool = False,
skip_pip: bool = False,
skip_conda: bool = False,
no_dependencies: bool = False,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> None:
"""Install the dependencies of a single `requirements.yaml` or `pyproject.toml` file.""" # noqa: E501
if no_dependencies:
skip_pip = True
skip_conda = True
files = tuple(dependencies_filename(f) for f in files)
requirements = parse_requirements(
*files,
ignore_pins=ignore_pins,
overwrite_pins=overwrite_pins,
skip_dependencies=skip_dependencies,
verbose=verbose,
)
platforms = [identify_current_platform()]
resolved = resolve_conflicts(
requirements.requirements,
platforms,
)
env_spec = create_conda_env_specification(
resolved,
requirements.channels,
platforms=platforms,
)
if env_spec.conda and not skip_conda:
conda_executable = conda_executable or _identify_conda_executable()
channel_args = ["--override-channels"] if env_spec.channels else []
for channel in env_spec.channels:
channel_args.extend(["--channel", channel])
conda_command = [
conda_executable,
"install",
"--yes",
*channel_args,
]
# When running the command in terminal, we need to wrap the pin in quotes
# so what we print is what the user would type (copy-paste).
to_print = [_format_inline_conda_package(pkg) for pkg in env_spec.conda] # type: ignore[arg-type]
conda_command_str = " ".join((*conda_command, *to_print))
print(f"📦 Installing conda dependencies with `{conda_command_str}`\n") # type: ignore[arg-type]
if not dry_run: # pragma: no cover
subprocess.run((*conda_command, *env_spec.conda), check=True) # type: ignore[arg-type] # noqa: S603
if env_spec.pip and not skip_pip:
pip_command = [sys.executable, "-m", "pip", "install", *env_spec.pip]
print(f"📦 Installing pip dependencies with `{' '.join(pip_command)}`\n")
if not dry_run: # pragma: no cover
subprocess.run(pip_command, check=True) # noqa: S603
installable = []
if not skip_local:
for file in files:
if is_pip_installable(file.parent):
installable.append(file.parent)
else: # pragma: no cover
print(
f"⚠️ Project {file.parent} is not pip installable. "
"Could not find setup.py or [build-system] in pyproject.toml.",
)
# Install local dependencies (if any) included via `local_dependencies:`
| #!/usr/bin/env python3
"""unidep - Unified Conda and Pip requirements management.
This module provides a command-line tool for managing conda environment.yaml files.
"""
from __future__ import annotations
if sys.version_info >= (3, 8):
else: # pragma: no cover
try: # pragma: no cover
class _HelpFormatter(RichHelpFormatter):
def _get_help_string(self, action: argparse.Action) -> str | None:
# escapes "[" in text, otherwise e.g., [linux] is removed
if action.help is not None:
return action.help.replace("[", r"\[")
return None
except ImportError: # pragma: no cover
_DEP_FILES = "`requirements.yaml` or `pyproject.toml`"
def _add_common_args( # noqa: PLR0912
sub_parser: argparse.ArgumentParser,
options: set[str],
) -> None: # pragma: no cover
if "directory" in options:
sub_parser.add_argument(
"-d",
"--directory",
type=Path,
default=".",
help=f"Base directory to scan for {_DEP_FILES} file(s), by default `.`",
)
if "file" in options:
sub_parser.add_argument(
"-f",
"--file",
type=Path,
default=".",
help=f"The {_DEP_FILES} file to parse, or folder"
" that contains that file, by default `.`",
)
if "verbose" in options:
sub_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print verbose output",
)
if "platform" in options:
current_platform = identify_current_platform()
sub_parser.add_argument(
"--platform",
"-p",
type=str,
action="append", # Allow multiple instances of -p
default=None, # Default is a list with the current platform set in `main`
choices=get_args(Platform),
help="The platform(s) to get the requirements for. "
"Multiple platforms can be specified. "
f"By default, the current platform (`{current_platform}`) is used.",
)
if "editable" in options:
sub_parser.add_argument(
"-e",
"--editable",
action="store_true",
help="Install the project in editable mode",
)
if "depth" in options:
sub_parser.add_argument(
"--depth",
type=int,
default=1,
help=f"Maximum depth to scan for {_DEP_FILES} files, by default 1",
)
if "*files" in options:
sub_parser.add_argument(
"files",
type=Path,
nargs="+",
help=f"The {_DEP_FILES} file(s) to parse"
" or folder(s) that contain"
" those file(s), by default `.`",
default=None, # default is "." set in `main`
)
if "skip-local" in options:
sub_parser.add_argument(
"--skip-local",
action="store_true",
help="Skip installing local dependencies",
)
if "skip-pip" in options:
sub_parser.add_argument(
"--skip-pip",
action="store_true",
help=f"Skip installing pip dependencies from {_DEP_FILES}",
)
if "skip-conda" in options:
sub_parser.add_argument(
"--skip-conda",
action="store_true",
help=f"Skip installing conda dependencies from {_DEP_FILES}",
)
if "skip-dependency" in options:
sub_parser.add_argument(
"--skip-dependency",
type=str,
action="append",
default=[],
help="Skip installing a specific dependency that is in one of the"
f" {_DEP_FILES}"
" files. This option can be used multiple times, each"
" time specifying a different package to skip."
" For example, use `--skip-dependency pandas` to skip installing pandas.",
)
if "no-dependencies" in options:
sub_parser.add_argument(
"--no-dependencies",
action="store_true",
help=f"Skip installing dependencies from {_DEP_FILES}"
" file(s) and only install local package(s). Useful after"
" installing a `conda-lock.yml` file because then all"
" dependencies have already been installed.",
)
if "conda-executable" in options:
sub_parser.add_argument(
"--conda-executable",
type=str,
choices=("conda", "mamba", "micromamba"),
help="The conda executable to use",
default=None,
)
if "dry-run" in options:
sub_parser.add_argument(
"--dry-run",
"--dry",
action="store_true",
help="Only print the commands that would be run",
)
if "ignore-pin" in options:
sub_parser.add_argument(
"--ignore-pin",
type=str,
action="append",
default=[],
help="Ignore the version pin for a specific package,"
" e.g., `--ignore-pin numpy`. This option can be repeated"
" to ignore multiple packages.",
)
if "overwrite-pin" in options:
sub_parser.add_argument(
"--overwrite-pin",
type=str,
action="append",
default=[],
help="Overwrite the version pin for a specific package,"
" e.g., `--overwrite-pin 'numpy==1.19.2'`. This option can be repeated"
" to overwrite the pins of multiple packages.",
)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Unified Conda and Pip requirements management.",
formatter_class=_HelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Subcommands")
# Subparser for the 'merge' command
merge_help = (
f"Combine multiple (or a single) {_DEP_FILES}"
" files into a"
" single Conda installable `environment.yaml` file."
)
merge_example = (
" Example usage: `unidep merge --directory . --depth 1 --output environment.yaml`" # noqa: E501
f" to search for {_DEP_FILES}"
" files in the current directory and its"
" subdirectories and create `environment.yaml`. These are the defaults, so you"
" can also just run `unidep merge`."
)
parser_merge = subparsers.add_parser(
"merge",
help=merge_help,
description=merge_help + merge_example,
formatter_class=_HelpFormatter,
)
parser_merge.add_argument(
"-o",
"--output",
type=Path,
default="environment.yaml",
help="Output file for the conda environment, by default `environment.yaml`",
)
parser_merge.add_argument(
"-n",
"--name",
type=str,
default="myenv",
help="Name of the conda environment, by default `myenv`",
)
parser_merge.add_argument(
"--stdout",
action="store_true",
help="Output to stdout instead of a file",
)
parser_merge.add_argument(
"--selector",
type=str,
choices=("sel", "comment"),
default="sel",
help="The selector to use for the environment markers, if `sel` then"
" `- numpy # [linux]` becomes `sel(linux): numpy`, if `comment` then"
" it remains `- numpy # [linux]`, by default `sel`",
)
_add_common_args(
parser_merge,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'install' command
install_help = (
f"Automatically install all dependencies from one or more {_DEP_FILES} files."
" This command first installs dependencies"
" with Conda, then with Pip. Finally, it installs local packages"
f" (those containing the {_DEP_FILES} files)"
" using `pip install [-e] ./project`."
)
install_example = (
" Example usage: `unidep install .` for a single project."
" For multiple projects: `unidep install ./project1 ./project2`."
" The command accepts both file paths and directories containing"
f" a {_DEP_FILES} file. Use `--editable` or"
" `-e` to install the local packages in editable mode. See"
f" `unidep install-all` to install all {_DEP_FILES} files in and below the"
" current folder."
)
parser_install = subparsers.add_parser(
"install",
help=install_help,
description=install_help + install_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install,
{
"*files",
"conda-executable",
"dry-run",
"editable",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
install_all_help = (
f"Install dependencies from all {_DEP_FILES}"
" files found in the current"
" directory or specified directory. This command first installs dependencies"
" using Conda, then Pip, and finally the local packages."
)
install_all_example = (
" Example usage: `unidep install-all` to install dependencies from all"
f" {_DEP_FILES}"
" files in the current directory. Use"
" `--directory ./path/to/dir` to specify a different directory. Use"
" `--depth` to control the depth of directory search. Add `--editable`"
" or `-e` for installing local packages in editable mode."
)
parser_install_all = subparsers.add_parser(
"install-all",
help=install_all_help,
description=install_all_help + install_all_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install_all,
{
"conda-executable",
"dry-run",
"editable",
"depth",
"directory",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
# Subparser for the 'conda-lock' command
conda_lock_help = (
"Generate a global `conda-lock.yml` file for a collection of"
f" {_DEP_FILES}"
" files. Additionally, create individual"
f" `conda-lock.yml` files for each {_DEP_FILES} file"
" consistent with the global lock file."
)
conda_lock_example = (
" Example usage: `unidep conda-lock --directory ./projects` to generate"
f" conda-lock files for all {_DEP_FILES}"
" files in the `./projects`"
" directory. Use `--only-global` to generate only the global lock file."
" The `--check-input-hash` option can be used to avoid regenerating lock"
" files if the input hasn't changed."
)
parser_lock = subparsers.add_parser(
"conda-lock",
help=conda_lock_help,
description=conda_lock_help + conda_lock_example,
formatter_class=_HelpFormatter,
)
parser_lock.add_argument(
"--only-global",
action="store_true",
help="Only generate the global lock file",
)
parser_lock.add_argument(
"--lockfile",
type=Path,
default="conda-lock.yml",
help="Specify a path for the global lockfile (default: `conda-lock.yml`"
" in current directory). Path should be relative, e.g.,"
" `--lockfile ./locks/example.conda-lock.yml`.",
)
parser_lock.add_argument(
"--check-input-hash",
action="store_true",
help="Check existing input hashes in lockfiles before regenerating lock files."
" This flag is directly passed to `conda-lock`.",
)
_add_common_args(
parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover
args.file = dependencies_filename(args.file)
return args
def _identify_conda_executable() -> str: # pragma: no cover
"""Identify the conda executable to use.
This function checks for micromamba, mamba, and conda in that order.
"""
if shutil.which("micromamba"):
return "micromamba"
if shutil.which("mamba"):
return "mamba"
if shutil.which("conda"):
return "conda"
msg = "Could not identify conda executable."
raise RuntimeError(msg)
def _format_inline_conda_package(package: str) -> str:
pkg = parse_package_str(package)
if pkg.pin is None:
return pkg.name
return f'{pkg.name}"{pkg.pin.strip()}"'
def _pip_install_local(
*folders: str | Path,
editable: bool,
dry_run: bool,
flags: list[str] | None = None,
) -> None: # pragma: no cover
pip_command = [sys.executable, "-m", "pip", "install"]
if flags:
pip_command.extend(flags)
for folder in sorted(folders):
if not os.path.isabs(folder): # noqa: PTH117
relative_prefix = ".\\" if os.name == "nt" else "./"
folder = f"{relative_prefix}{folder}" # noqa: PLW2901
if editable:
pip_command.extend(["-e", str(folder)])
else:
pip_command.append(str(folder))
print(f"📦 Installing project with `{' '.join(pip_command)}`\n")
if not dry_run:
subprocess.run(pip_command, check=True) # noqa: S603
def _install_command( # noqa: PLR0912
*files: Path,
conda_executable: str,
dry_run: bool,
editable: bool,
skip_local: bool = False,
skip_pip: bool = False,
skip_conda: bool = False,
no_dependencies: bool = False,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> None:
"""Install the dependencies of a single `requirements.yaml` or `pyproject.toml` file.""" # noqa: E501
if no_dependencies:
skip_pip = True
skip_conda = True
files = tuple(dependencies_filename(f) for f in files)
requirements = parse_requirements(
*files,
ignore_pins=ignore_pins,
overwrite_pins=overwrite_pins,
skip_dependencies=skip_dependencies,
verbose=verbose,
)
platforms = [identify_current_platform()]
resolved = resolve_conflicts(
requirements.requirements,
platforms,
)
env_spec = create_conda_env_specification(
resolved,
requirements.channels,
platforms=platforms,
)
if env_spec.conda and not skip_conda:
conda_executable = conda_executable or _identify_conda_executable()
channel_args = ["--override-channels"] if env_spec.channels else []
for channel in env_spec.channels:
channel_args.extend(["--channel", channel])
conda_command = [
conda_executable,
"install",
"--yes",
*channel_args,
]
# When running the command in terminal, we need to wrap the pin in quotes
# so what we print is what the user would type (copy-paste).
to_print = [_format_inline_conda_package(pkg) for pkg in env_spec.conda] # type: ignore[arg-type]
conda_command_str = " ".join((*conda_command, *to_print))
print(f"📦 Installing conda dependencies with `{conda_command_str}`\n") # type: ignore[arg-type]
if not dry_run: # pragma: no cover
subprocess.run((*conda_command, *env_spec.conda), check=True) # type: ignore[arg-type] # noqa: S603
if env_spec.pip and not skip_pip:
pip_command = [sys.executable, "-m", "pip", "install", *env_spec.pip]
print(f"📦 Installing pip dependencies with `{' '.join(pip_command)}`\n")
if not dry_run: # pragma: no cover
subprocess.run(pip_command, check=True) # noqa: S603
installable = []
if not skip_local:
for file in files:
if is_pip_installable(file.parent):
installable.append(file.parent)
else: # pragma: no cover
print(
f"⚠️ Project {file.parent} is not pip installable. "
"Could not find setup.py or [build-system] in pyproject.toml.",
)
# Install local dependencies (if any) included via `local_dependencies:` | local_dependencies = parse_local_dependencies( | 5 | 2023-11-16 04:23:01+00:00 | 12k |
BAAI-DCAI/SegVol | inference_demo.py | [
{
"identifier": "sam_model_registry",
"path": "segment_anything_volumetric/build_sam.py",
"snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):"
},
{
"identifier": "SegVol",
"path": "network/model.py",
"snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes"
},
{
"identifier": "process_ct_gt",
"path": "data_process/demo_data_process.py",
"snippet": "def process_ct_gt(case_path, label_path, category, spatial_size):\n print('Data preprocessing...')\n # transform\n img_loader = transforms.LoadImage()\n transform = transforms.Compose(\n [\n transforms.Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ForegroundNormalization(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\"]),\n MinMaxNormalization(),\n transforms.SpatialPadd(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='constant'),\n transforms.CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n transforms.ToTensord(keys=[\"image\", \"label\"]),\n ]\n )\n zoom_out_transform = transforms.Resized(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='nearest-exact')\n\n ###\n item = {}\n # generate ct_voxel_ndarray\n ct_voxel_ndarray, _ = img_loader(case_path)\n print(type(ct_voxel_ndarray))\n ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()\n ct_shape = ct_voxel_ndarray.shape\n ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)\n item['image'] = ct_voxel_ndarray\n\n # generate gt_voxel_ndarray\n gt_voxel_ndarray, _ = img_loader(label_path)\n gt_voxel_ndarray = np.array(gt_voxel_ndarray)\n present_categories = np.unique(gt_voxel_ndarray)\n gt_masks = []\n for cls_idx in range(len(category)):\n # ignore background\n cls = cls_idx + 1\n if cls not in present_categories:\n gt_voxel_ndarray_category = np.zeros(ct_shape)\n gt_masks.append(gt_voxel_ndarray_category)\n else:\n gt_voxel_ndarray_category = gt_voxel_ndarray.copy()\n gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0\n gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1\n gt_masks.append(gt_voxel_ndarray_category)\n gt_voxel_ndarray = np.stack(gt_masks, axis=0)\n assert gt_voxel_ndarray.shape[0] == len(category) and gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]\n item['label'] = gt_voxel_ndarray.astype(np.int32)\n\n # transform\n item = transform(item)\n item_zoom_out = zoom_out_transform(item)\n item['zoom_out_image'] = item_zoom_out['image']\n item['zoom_out_label'] = item_zoom_out['label']\n print( 'Zoom_in image shape: ', item['image'].shape, \n '\\nZoom_in label shape: ', item['label'].shape,\n '\\nZoom_out image shape: ', item['zoom_out_image'].shape,\n '\\nZoom_out label shape: ', item['zoom_out_label'].shape,\n )\n return item"
},
{
"identifier": "sliding_window_inference",
"path": "utils/monai_inferers_utils.py",
"snippet": "def sliding_window_inference(\n inputs: torch.Tensor,\n prompt_reflection: Union[torch.Tensor, Tuple[torch.Tensor, ...]],\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor], Dict[Any, torch.Tensor]]],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n progress: bool = False,\n roi_weight_map: Union[torch.Tensor, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> Union[torch.Tensor, Tuple[torch.Tensor, ...], Dict[Any, torch.Tensor]]:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.\n Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.\n e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes\n could be ([128,64,256], [64,32,128]).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still\n an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters\n so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor ``patch_data`` in shape NCHW[D],\n The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary\n with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];\n where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,\n N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),\n the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen\n to ensure the scaled output ROI sizes are still integers.\n If the `predictor`'s input and output spatial sizes are different,\n we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n progress: whether to print a `tqdm` progress bar.\n roi_weight_map: pre-computed (non-negative) weight map for each ROI.\n If not given, and ``mode`` is not `constant`, this map will be computed on the fly.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n print('sliding window inference for ROI')\n text = kwargs['text']\n use_box = kwargs['use_box']\n use_point = kwargs['use_point']\n assert not (use_box and use_point)\n compute_dtype = inputs.dtype\n num_spatial_dims = len(inputs.shape) - 2\n if overlap < 0 or overlap >= 1:\n raise ValueError(\"overlap must be >= 0 and < 1.\")\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n batch_size, _, *image_size_ = inputs.shape\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n if use_point or use_box:\n binary_prompt_map, global_preds = prompt_reflection\n global_preds = F.pad(global_preds, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n valid_patch_size = get_valid_patch_size(image_size, roi_size)\n if valid_patch_size == roi_size and (roi_weight_map is not None):\n importance_map = roi_weight_map\n else:\n try:\n importance_map = compute_importance_map(valid_patch_size, mode=mode, sigma_scale=sigma_scale, device=device)\n except BaseException as e:\n raise RuntimeError(\n \"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'.\"\n ) from e\n importance_map = convert_data_type(importance_map, torch.Tensor, device, compute_dtype)[0] # type: ignore\n # handle non-positive weights\n min_non_zero = max(importance_map[importance_map != 0].min().item(), 1e-3)\n importance_map = torch.clamp(importance_map.to(torch.float32), min=min_non_zero).to(compute_dtype)\n\n # Perform predictions\n dict_key, output_image_list, count_map_list = None, [], []\n _initialized_ss = -1\n is_tensor_output = True # whether the predictor's output is a tensor (instead of dict/tuple)\n\n # for each patch\n for slice_g in tqdm(range(0, total_slices, sw_batch_size)) if progress else range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n #############\n \n boxes = None\n points = None\n if use_point:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n point, point_label = select_points(window_binary_prompt_map.squeeze())\n points = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) \n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n if use_box:\n if num_win == 1:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(window_binary_prompt_map.squeeze()).unsqueeze(0).float().cuda()\n else:\n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n seg_prob_out = predictor(window_data, text, boxes, points) # batched patch segmentation\n #############\n # convert seg_prob_out to tuple seg_prob_tuple, this does not allocate new memory.\n seg_prob_tuple: Tuple[torch.Tensor, ...]\n if isinstance(seg_prob_out, torch.Tensor):\n seg_prob_tuple = (seg_prob_out,)\n elif isinstance(seg_prob_out, Mapping):\n if dict_key is None:\n dict_key = sorted(seg_prob_out.keys()) # track predictor's output keys\n seg_prob_tuple = tuple(seg_prob_out[k] for k in dict_key)\n is_tensor_output = False\n else:\n seg_prob_tuple = ensure_tuple(seg_prob_out)\n is_tensor_output = False\n\n # for each output in multi-output list\n for ss, seg_prob in enumerate(seg_prob_tuple):\n seg_prob = seg_prob.to(device) # BxCxMxNxP or BxCxMxN\n\n # compute zoom scale: out_roi_size/in_roi_size\n zoom_scale = []\n for axis, (img_s_i, out_w_i, in_w_i) in enumerate(\n zip(image_size, seg_prob.shape[2:], window_data.shape[2:])\n ):\n _scale = out_w_i / float(in_w_i)\n if not (img_s_i * _scale).is_integer():\n warnings.warn(\n f\"For spatial axis: {axis}, output[{ss}] will have non-integer shape. Spatial \"\n f\"zoom_scale between output[{ss}] and input is {_scale}. Please pad inputs.\"\n )\n zoom_scale.append(_scale)\n\n if _initialized_ss < ss: # init. the ss-th buffer at the first iteration\n # construct multi-resolution outputs\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + [\n int(image_size_d * zoom_scale_d) for image_size_d, zoom_scale_d in zip(image_size, zoom_scale)\n ]\n # allocate memory to store the full output and the count for overlapping parts\n output_image_list.append(torch.zeros(output_shape, dtype=compute_dtype, device=device))\n count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))\n _initialized_ss += 1\n\n # resizing the importance_map\n resizer = Resize(spatial_size=seg_prob.shape[2:], mode=\"nearest\", anti_aliasing=False)\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n # zoom roi\n original_idx_zoom = list(original_idx) # 4D for 2D image, 5D for 3D image\n for axis in range(2, len(original_idx_zoom)):\n zoomed_start = original_idx[axis].start * zoom_scale[axis - 2]\n zoomed_end = original_idx[axis].stop * zoom_scale[axis - 2]\n if not zoomed_start.is_integer() or (not zoomed_end.is_integer()):\n warnings.warn(\n f\"For axis-{axis-2} of output[{ss}], the output roi range is not int. \"\n f\"Input roi range is ({original_idx[axis].start}, {original_idx[axis].stop}). \"\n f\"Spatial zoom_scale between output[{ss}] and input is {zoom_scale[axis - 2]}. \"\n f\"Corresponding output roi range is ({zoomed_start}, {zoomed_end}).\\n\"\n f\"Please change overlap ({overlap}) or roi_size ({roi_size[axis-2]}) for axis-{axis-2}. \"\n \"Tips: if overlap*roi_size*zoom_scale is an integer, it usually works.\"\n )\n original_idx_zoom[axis] = slice(int(zoomed_start), int(zoomed_end), None)\n importance_map_zoom = resizer(importance_map.unsqueeze(0))[0].to(compute_dtype)\n # store results and weights\n output_image_list[ss][original_idx_zoom] += importance_map_zoom * seg_prob[idx - slice_g]\n count_map_list[ss][original_idx_zoom] += (\n importance_map_zoom.unsqueeze(0).unsqueeze(0).expand(count_map_list[ss][original_idx_zoom].shape)\n )\n\n # account for any overlapping sections\n for ss in range(len(output_image_list)):\n output_image_list[ss] = (output_image_list[ss] / count_map_list.pop(0)).to(compute_dtype)\n\n # remove padding if image_size smaller than roi_size\n for ss, output_i in enumerate(output_image_list):\n if torch.isnan(output_i).any() or torch.isinf(output_i).any():\n warnings.warn(\"Sliding window inference results contain NaN or Inf.\")\n\n zoom_scale = [\n seg_prob_map_shape_d / roi_size_d for seg_prob_map_shape_d, roi_size_d in zip(output_i.shape[2:], roi_size)\n ]\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n slice_dim = slice(\n int(round(slice_dim.start * zoom_scale[num_spatial_dims - sp - 1])),\n int(round(slice_dim.stop * zoom_scale[num_spatial_dims - sp - 1])),\n )\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_i.shape):\n final_slicing.insert(0, slice(None))\n output_image_list[ss] = output_i[final_slicing]\n\n if dict_key is not None: # if output of predictor is a dict\n final_output = dict(zip(dict_key, output_image_list))\n else:\n final_output = tuple(output_image_list) # type: ignore\n return final_output[0] if is_tensor_output else final_output # type: ignore"
},
{
"identifier": "generate_box",
"path": "utils/monai_inferers_utils.py",
"snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)"
},
{
"identifier": "select_points",
"path": "utils/monai_inferers_utils.py",
"snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)"
},
{
"identifier": "build_binary_cube",
"path": "utils/monai_inferers_utils.py",
"snippet": "def build_binary_cube(bbox, binary_cube_shape):\n min_coord = bbox[0][:3].int().tolist()\n max_coord = bbox[0][3:].int().tolist()\n binary_cube = torch.zeros(binary_cube_shape)\n binary_cube[min_coord[0]:max_coord[0]+1, min_coord[1]:max_coord[1]+1, min_coord[2]:max_coord[2]+1] = 1\n return binary_cube"
},
{
"identifier": "build_binary_points",
"path": "utils/monai_inferers_utils.py",
"snippet": "def build_binary_points(points, labels, shape):\n binary_points = torch.zeros(shape, dtype=torch.int16)\n binary_points[points[labels == 1, 0].long(), points[labels == 1, 1].long(), points[labels == 1, 2].long()] = 1\n return binary_points"
},
{
"identifier": "logits2roi_coor",
"path": "utils/monai_inferers_utils.py",
"snippet": "def logits2roi_coor(spatial_size, logits_global_single):\n # crop predict\n pred_global_single = torch.sigmoid(logits_global_single) > 0.5\n ## get all pos idx\n nonzero_indices = torch.nonzero(pred_global_single)\n if nonzero_indices.shape[0] == 0:\n return None, None, None, None, None, None\n ## get boundary\n min_d, max_d = nonzero_indices[:, 0].min(), nonzero_indices[:, 0].max()\n min_h, max_h = nonzero_indices[:, 1].min(), nonzero_indices[:, 1].max()\n min_w, max_w = nonzero_indices[:, 2].min(), nonzero_indices[:, 2].max()\n ## padding\n crop_d, crop_h, crop_w = max_d - min_d + 1, max_h - min_h + 1, max_w - min_w + 1,\n window_d, window_h, window_w = spatial_size\n padding_d, padding_h, padding_w = max(0, window_d-crop_d), max(0, window_h-crop_h), max(0, window_w-crop_w)\n global_d, global_h, global_w = logits_global_single.shape\n min_d = max(0, min_d - int(padding_d)//2)\n min_h = max(0, min_h - int(padding_h)//2)\n min_w = max(0, min_w - int(padding_w)//2)\n max_d = min(global_d, max_d + int(padding_d)//2)\n max_h = min(global_h, max_h + int(padding_h)//2)\n max_w = min(global_w, max_w + int(padding_w)//2)\n return min_d, min_h, min_w, max_d, max_h, max_w"
},
{
"identifier": "draw_result",
"path": "utils/visualize.py",
"snippet": "def draw_result(category, image, bboxes, points, logits, gt3D, spatial_size, work_dir):\n zoom_out_transform = transforms.Compose([\n transforms.AddChanneld(keys=[\"image\", \"label\", \"logits\"]),\n transforms.Resized(keys=[\"image\", \"label\", \"logits\"], spatial_size=spatial_size, mode='nearest-exact')\n ])\n post_item = zoom_out_transform({\n 'image': image,\n 'label': gt3D,\n 'logits': logits\n })\n image, gt3D, logits = post_item['image'][0], post_item['label'][0], post_item['logits'][0]\n preds = torch.sigmoid(logits)\n preds = (preds > 0.5).int()\n\n root_dir=os.path.join(work_dir, f'fig_examples/{category}/') \n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if bboxes is not None:\n x1, y1, z1, x2, y2, z2 = bboxes[0].cpu().numpy()\n if points is not None:\n points = (points[0].cpu().numpy(), points[1].cpu().numpy())\n points_ax = points[0][0] # [n, 3]\n points_label = points[1][0] # [n]\n\n for j in range(image.shape[0]):\n img_2d = image[j, :, :].detach().cpu().numpy()\n preds_2d = preds[j, :, :].detach().cpu().numpy()\n label_2d = gt3D[j, :, :].detach().cpu().numpy()\n if np.sum(label_2d) == 0 or np.sum(preds_2d) == 0:\n continue\n\n img_2d = img_2d * 255\n # orginal img\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.imshow(img_2d, cmap='gray')\n ax1.set_title('Image with prompt') \n ax1.axis('off')\n\n # gt\n ax2.imshow(img_2d, cmap='gray')\n show_mask(label_2d, ax2)\n ax2.set_title('Ground truth') \n ax2.axis('off')\n\n # preds\n ax3.imshow(img_2d, cmap='gray')\n show_mask(preds_2d, ax3)\n ax3.set_title('Prediction') \n ax3.axis('off')\n\n # boxes\n if bboxes is not None:\n if j >= x1 and j <= x2:\n show_box((z1, y1, z2, y2), ax1)\n # points\n if points is not None:\n for point_idx in range(points_label.shape[0]):\n point = points_ax[point_idx]\n label = points_label[point_idx] # [1]\n if j == point[0]:\n show_points(point, label, ax1)\n \n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n plt.savefig(os.path.join(root_dir, f'{category}_{j}.png'), bbox_inches='tight')\n plt.close()"
}
] | import argparse
import os
import torch
import torch.nn.functional as F
import json
import monai.transforms as transforms
from segment_anything_volumetric import sam_model_registry
from network.model import SegVol
from data_process.demo_data_process import process_ct_gt
from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor
from utils.visualize import draw_result | 10,330 |
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda()
|
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() | binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) | 6 | 2023-11-10 08:25:37+00:00 | 12k |
xk-huang/segment-caption-anything | tests/models/sca/test_modeling_sca.py | [
{
"identifier": "ScaConfig",
"path": "src/models/sca/configuration_sca.py",
"snippet": "class ScaConfig(PretrainedConfig):\n model_type = \"sca\"\n is_composition = True\n\n def __init__(\n self,\n vision_config=None,\n prompt_encoder_config=None,\n mask_caption_decoder_config=None,\n text_config=None,\n initializer_range=0.02,\n # NOTE: for recoginition pretrain\n num_task_tokens: int = 6,\n **kwargs,\n ):\n super().__init__(**kwargs)\n vision_config = vision_config if vision_config is not None else {}\n prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}\n mask_caption_decoder_config = mask_caption_decoder_config if mask_caption_decoder_config is not None else {}\n text_config = text_config if text_config is not None else {}\n\n if isinstance(vision_config, SamVisionConfig):\n self.vision = vision_config.to_dict()\n if isinstance(prompt_encoder_config, SamPromptEncoderConfig):\n self.prompt_encoder = prompt_encoder_config.to_dict()\n if isinstance(mask_caption_decoder_config, ScaMaskCaptionDecoderConfig):\n self.mask_caption_decoder_config = mask_caption_decoder_config.to_dict()\n\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"gpt2\"\n # NOTE(xiaoke): use_decoder_only_language_model only return the model class like GPT2, rather the task model class\n # like GPT2forCausalLM. We need the task model class to load the pretrained weights for the task.\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n\n self.vision_config = SamVisionConfig(**vision_config)\n self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config)\n self.mask_caption_decoder_config = ScaMaskCaptionDecoderConfig(**mask_caption_decoder_config)\n self.initializer_range = initializer_range\n\n self.num_task_tokens = num_task_tokens\n\n def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"prompt_encoder_config\"] = self.prompt_encoder_config.to_dict()\n output[\"mask_caption_decoder_config\"] = self.mask_caption_decoder_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n @classmethod\n def from_sam_text_configs(\n cls,\n sam_config: SamConfig,\n text_config: Optional[PretrainedConfig] = None,\n additional_num_hidden_layers: Optional[int] = None,\n num_caption_tokens: Optional[int] = None,\n num_task_tokens: Optional[int] = None,\n num_caption_heads: Optional[int] = None,\n vl_projector_type: Optional[str] = None,\n vl_projector_norm_type: Optional[str] = None,\n **kwargs,\n ):\n if additional_num_hidden_layers is None:\n logger.warning(\"additional_num_hidden_layers is not set, using default value: 2. Make sure it is correct!\")\n additional_num_hidden_layers = 2\n if num_caption_tokens is None:\n logger.warning(\"num_caption_tokens is not set, using default value: 1. Make sure it is correct!\")\n num_caption_tokens: int = 1\n if num_task_tokens is None:\n logger.warning(\"num_task_tokens is not set, using default value: 6. Make sure it is correct!\")\n num_task_tokens = 6\n if num_caption_heads is None:\n logger.warning(\"num_caption_heads is not set, using default value: 1. Make sure it is correct!\")\n num_caption_heads = 1\n if vl_projector_type is None:\n logger.warning(\"vl_projector_type is not set, using default value: linear. Make sure it is correct!\")\n vl_projector_type = \"linear\"\n if vl_projector_norm_type is None:\n logger.warning(\"vl_projector_norm_type is not set, using default value: none. Make sure it is correct!\")\n vl_projector_norm_type = \"none\"\n\n return cls(\n vision_config=sam_config.vision_config.to_dict(),\n prompt_encoder_config=sam_config.prompt_encoder_config.to_dict(),\n mask_caption_decoder_config={\n **sam_config.mask_decoder_config.to_dict(),\n \"additional_num_hidden_layers\": additional_num_hidden_layers,\n \"num_caption_tokens\": num_caption_tokens,\n \"num_caption_heads\": num_caption_heads,\n },\n text_config=text_config.to_dict() if text_config is not None else None,\n num_task_tokens=num_task_tokens,\n vl_projector_type=vl_projector_type,\n vl_projector_norm_type=vl_projector_norm_type,\n **kwargs,\n )"
},
{
"identifier": "ScaProcessor",
"path": "src/models/sca/processing_sca.py",
"snippet": "class ScaProcessor(ProcessorMixin):\n attributes = [\"tokenizer\"]\n tokenizer_class = \"AutoTokenizer\"\n\n def __init__(self, sam_processor, tokenizer):\n super().__init__(tokenizer)\n self.sam_processor: SamProcessor = sam_processor\n\n def __call__(\n self,\n # from ../sam/processing_sam.py\n images=None,\n input_points=None,\n input_labels=None,\n input_boxes=None,\n original_sizes=None,\n # from transformers.models.blip.processing_blip.py\n text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_token_type_ids: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n return_tensors=None,\n **kwargs,\n ):\n if images is None and original_sizes is None:\n raise ValueError(f\"images and original_sizes cannot both be None.\")\n\n if images is not None:\n input_encoding = self.sam_processor(\n images=images,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n **kwargs,\n )\n images = make_list_of_images(images)\n input_encoding[\"images\"] = make_list_of_images(images)\n else:\n input_encoding = self.sam_processor.process_prompts(\n original_sizes=original_sizes,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n )\n\n if text is not None:\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n else:\n text_encoding = {}\n input_encoding.update(text_encoding)\n\n return input_encoding\n\n def post_process_masks(self, *args, **kwargs):\n return self.sam_processor.post_process_masks(*args, **kwargs)\n\n @classmethod\n def from_sam_text_pretrained(cls, sam_pretrained_model_name_or_path, text_pretrained_model_name_or_path, **kwargs):\n sam_processor = SamProcessor.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n # NOTE: To be compatible with OpenLLAMA which uses the slow tokenizer to avoid a bug.\n # Ref: https://github.com/openlm-research/open_llama#loading-the-weights-with-hugging-face-transformers\n if \"open_llama\" in text_pretrained_model_name_or_path:\n logger.warning(f\"Using slow tokenizer for {text_pretrained_model_name_or_path}.\")\n use_fast = False\n else:\n use_fast = True\n captioner_processor = AutoProcessor.from_pretrained(\n text_pretrained_model_name_or_path, use_fast=use_fast, **kwargs\n )\n return cls(sam_processor, captioner_processor)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n sam_processor_input_names = self.sam_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + sam_processor_input_names))"
},
{
"identifier": "ScaModel",
"path": "src/models/sca/modeling_sca.py",
"snippet": "class ScaModel(ScaPretrainedModel):\n _keys_to_ignore_on_load_missing = [r\"prompt_encoder.shared_embedding.positional_embedding\"]\n\n def __init__(self, config: ScaConfig, language_model: nn.Module = None):\n super().__init__(config)\n self.shared_image_embedding = SamPositionalEmbedding(config.vision_config)\n\n self.vision_encoder = SamVisionEncoder(config.vision_config)\n self.prompt_encoder = SamPromptEncoder(config.prompt_encoder_config, self.shared_image_embedding)\n # NOTE(xiaoke): Modified. We need to outputs one more tensor: `query_outputs` for captioning\n # Thus its real name is `mask_caption_decoder`, but we keep the name `mask_decoder` for loading SAM weights.\n self.mask_decoder = ScaMaskCaptionDecoder(config.mask_caption_decoder_config)\n\n self.language_project = nn.Linear(\n config.mask_caption_decoder_config.hidden_size, config.text_config.hidden_size\n )\n if language_model is None:\n if config.use_decoder_only_language_model:\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n else:\n raise ValueError(\"Only decoder only language model is supported.\")\n self.language_model = language_model\n\n if config.text_config != self.language_model.config:\n text_config_dict = config.text_config.to_dict()\n language_model_config_dict = self.language_model.config.to_dict()\n all_keys = set(text_config_dict.keys()) | set(language_model_config_dict.keys())\n diff_kv = {}\n for k in all_keys:\n if k not in text_config_dict and k in language_model_config_dict:\n diff_kv[k] = (None, language_model_config_dict[k])\n elif k in text_config_dict and k not in language_model_config_dict:\n diff_kv[k] = (text_config_dict[k], None)\n else:\n if text_config_dict[k] != language_model_config_dict[k]:\n diff_kv[k] = (text_config_dict[k], language_model_config_dict[k])\n logger.warning(\n \"The text config is different from the original config and the language model config. The following keys have different \"\n \"values: {}\".format(diff_kv)\n )\n # NOTE: To support gradient checkpoint for LM: https://github.com/huggingface/transformers/pull/19990/files\n self.supports_gradient_checkpointing = True\n\n # Find generation config in language model\n def search_generation_config(obj, parent_key=\"base\"):\n generation_configs = []\n for attr in dir(obj):\n if attr.startswith(\"_\"):\n continue\n elif attr == \"generation_config\" and getattr(obj, attr) is not None:\n generation_configs.append((f\"{parent_key}-{attr}\", getattr(obj, attr)))\n elif isinstance(getattr(obj, attr), (nn.Module, PreTrainedModel)):\n # skip self reference to avoid infinite recursion\n if obj == getattr(obj, attr):\n continue\n generation_configs.extend(\n search_generation_config(getattr(obj, attr), parent_key=f\"{parent_key}-{attr}\")\n )\n return generation_configs\n\n generation_configs = search_generation_config(self.language_model, parent_key=\"captioner\")\n if len(generation_configs) != 1:\n logger.warning(f\"generation_configs: {generation_configs} has to be of length 1, we use the first one\")\n generation_config = generation_configs[0][1]\n if generation_config is not None:\n self.generation_config = generation_config\n logger.info(f\"generation_config: {generation_config} is used for `generate`\")\n\n self.config_parameters()\n self.post_init()\n\n # Copied from ..sam.modeling_sam.SamModel\n def get_input_embeddings(self):\n return self.vision_encoder.get_input_embeddings()\n\n def get_image_wide_positional_embeddings(self):\n size = self.config.prompt_encoder_config.image_embedding_size\n target_device = self.shared_image_embedding.positional_embedding.device\n target_dtype = self.shared_image_embedding.positional_embedding.dtype\n grid = torch.ones((size, size), device=target_device, dtype=target_dtype)\n y_embed = grid.cumsum(dim=0) - 0.5\n x_embed = grid.cumsum(dim=1) - 0.5\n y_embed = y_embed / size\n x_embed = x_embed / size\n\n positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))\n return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width\n\n @torch.no_grad()\n def get_image_embeddings(\n self,\n pixel_values,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n r\"\"\"\n Returns the image embeddings by passing the pixel values through the vision encoder.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input pixel values\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n \"\"\"\n vision_output = self.vision_encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n image_embeddings = vision_output[0]\n return image_embeddings\n\n @torch.no_grad()\n def get_prompt_embeddings(\n self,\n input_points: Optional[torch.FloatTensor] = None,\n input_labels: Optional[torch.LongTensor] = None,\n input_boxes: Optional[torch.FloatTensor] = None,\n input_masks: Optional[torch.LongTensor] = None,\n ):\n r\"\"\"\n Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.\n\n Args:\n input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):\n Optional input points for the prompt encoder. The padding of the point is automatically done by the\n processor. `point_batch_size` refers to the number of masks that we want the model to predict per\n point. The model will output `point_batch_size` times 3 masks in total.\n input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):\n Optional input labels for the prompt encoder. The padding of the labels is automatically done by the\n processor, or can be fed by the user.\n input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):\n Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the\n processor. users can also pass manually the input boxes.\n input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):\n Optional input masks for the prompt encoder.\n \"\"\"\n prompt_output = self.prompt_encoder(\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n input_masks=input_masks,\n )\n return prompt_output\n\n # NOTE(xiaoke). Modified from ..sam.modeling_sam.SamModel\n def forward(\n self,\n mode=\"train\",\n pixel_values: Optional[torch.FloatTensor] = None,\n input_points: Optional[torch.FloatTensor] = None,\n input_labels: Optional[torch.LongTensor] = None,\n input_boxes: Optional[torch.FloatTensor] = None,\n input_masks: Optional[torch.LongTensor] = None,\n image_embeddings: Optional[torch.FloatTensor] = None,\n multimask_output: bool = True,\n attention_similarity: Optional[torch.FloatTensor] = None,\n target_embedding: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict=None,\n # segmentation arguments\n mask_labels: Optional[torch.LongTensor] = None,\n # language model arguments\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n # legacy arguments for catching the inputs for sam captioner\n images=None,\n original_sizes=None,\n reshaped_input_sizes=None,\n **kwargs,\n ) -> List[Dict[str, torch.Tensor]]:\n r\"\"\"\n Example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoModel, AutoProcessor\n\n >>> model = AutoModel.from_pretrained(\"facebook/sam-vit-base\")\n >>> processor = AutoProcessor.from_pretrained(\"facebook/sam-vit-base\")\n\n >>> img_url = \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png\"\n >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert(\"RGB\")\n >>> input_points = [[[400, 650]]] # 2D location of a window on the car\n >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors=\"pt\")\n\n >>> # Get segmentation mask\n >>> outputs = model(**inputs)\n\n >>> # Postprocess masks\n >>> masks = processor.post_process_masks(\n ... outputs.pred_masks, inputs[\"original_sizes\"], inputs[\"reshaped_input_sizes\"]\n ... )\n ```\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None and image_embeddings is None:\n raise ValueError(\"Either pixel_values or image_embeddings must be provided.\")\n\n if pixel_values is not None and image_embeddings is not None:\n raise ValueError(\"Only one of pixel_values and image_embeddings can be provided.\")\n\n if input_points is not None and len(input_points.shape) != 4:\n raise ValueError(\n \"The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.\",\n \" got {}.\".format(input_points.shape),\n )\n if input_boxes is not None and len(input_boxes.shape) != 3:\n raise ValueError(\n \"The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.\",\n \" got {}.\".format(input_boxes.shape),\n )\n if input_points is not None and input_boxes is not None:\n point_batch_size = input_points.shape[1]\n box_batch_size = input_boxes.shape[1]\n if point_batch_size != box_batch_size:\n raise ValueError(\n \"You should provide as many bounding boxes as input points per box. Got {} and {}.\".format(\n point_batch_size, box_batch_size\n )\n )\n\n image_positional_embeddings = self.get_image_wide_positional_embeddings()\n # repeat with batch size\n batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]\n image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)\n\n vision_attentions = None\n vision_hidden_states = None\n\n if pixel_values is not None:\n vision_outputs = self.vision_encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n image_embeddings = vision_outputs[0]\n\n if output_hidden_states:\n vision_hidden_states = vision_outputs[1]\n if output_attentions:\n vision_attentions = vision_outputs[-1]\n\n if input_points is not None and input_labels is None:\n input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)\n\n if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:\n raise ValueError(\n \"The batch size of the image embeddings and the input points must be the same. \",\n \"Got {} and {} respectively.\".format(image_embeddings.shape[0], input_points.shape[0]),\n \" if you want to pass multiple points for the same image, make sure that you passed \",\n \" input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and \",\n \" input_labels of shape (batch_size, point_batch_size, num_points_per_image)\",\n )\n\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n input_masks=input_masks,\n )\n\n # NOTE(xiaoke): Modified. We need to outputs one more tensor: `query_outputs`\n low_res_masks, iou_predictions, query_outputs, mask_decoder_attentions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_positional_embeddings=image_positional_embeddings,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n attention_similarity=attention_similarity,\n target_embedding=target_embedding,\n output_attentions=output_attentions,\n )\n\n # low_res_masks: (batch_size, num_masks, num_output_heads, logits_height, logits_width)\n # iou_predictions: (batch_size, num_masks, num_output_heads)\n # query_outputs: (batch_size, num_masks, num_output_heads, num_caption_tokens, hidden_size)\n batch_size, num_masks, num_output_heads, num_caption_tokens, hidden_size = query_outputs.shape\n # NOTE(xiaoke): We use `expand` instead of `repeat` to avoid copying the tensor.\n # So now we need to `reshape` the tensor to the original shape due to the mismatched stride.\n query_outputs = query_outputs.reshape(\n -1, num_caption_tokens, hidden_size\n ) # (batch_size * num_masks * num_output_heads, num_caption_tokens, hidden_size)\n\n language_model_inputs = self.language_project(\n query_outputs\n ) # (batch_size * num_masks * num_output_heads, num_caption_tokens, hidden_size)\n language_model_attention_mask = torch.ones(\n language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device\n ) # (batch_size * num_masks * num_output_heads, 1)\n\n # NOTE(xiaoke): Handle the edge case. If in train mode, and one of the input_ids and attention_mask is None, we should set the labels to None explicitly.\n if mode == \"train\" and (input_ids is None or attention_mask is None):\n logger.info(\n \"In train mode, and one of the input_ids and attention_mask is None. Set them and labels to None.\"\n )\n input_ids = None\n attention_mask = None\n labels = None\n\n if mode == \"train\" and (input_ids is not None and attention_mask is not None):\n # input_ids: (batch_size, num_masks, PADDED_length)\n # attention_mask: (batch_size, num_masks, PADDED_length)\n # NOTE(xiaoke): Copy from ..sam_captioner.modeling_sam_captioner.SamCaptionerModel\n input_ids = input_ids.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n attention_mask = (\n attention_mask.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n ) # (batch_size * num_masks * num_output_heads, PADDED_length)\n\n # TODO(xiaoke): Now we repeat the labels num_output_heads times. Is this correct?\n # Shall we follow SAM to backpropagate the loss for the head with the lowest IoU?\n if labels is not None:\n labels = labels.unsqueeze(-2).repeat_interleave(num_output_heads, dim=-2).flatten(0, 2)\n\n inputs_embeds = self.language_model.get_input_embeddings()(input_ids)\n inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)\n\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n expected_device = language_model_attention_mask.device\n attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)\n else:\n inputs_embeds = language_model_inputs\n attention_mask = language_model_attention_mask\n\n if self.config.use_decoder_only_language_model:\n if mode == \"train\":\n outputs = self.language_model(\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n logits = outputs.logits if return_dict else outputs[0]\n loss = None\n # we compute the loss here since we need to take into account the sequence length of the query embeds\n if labels is not None:\n # TODO(xiaoke): Now we repeat the labels num_output_heads times. Is this correct?\n # Shall we follow SAM to backpropagate the loss for the head with the lowest IoU?\n labels = labels.to(logits.device)\n logits = logits[:, -labels.size(1) :, :]\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous().to(logits.device)\n\n # Flatten the tokens\n loss_fct = CrossEntropyLoss(reduction=\"mean\")\n\n loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))\n else:\n for key in list(kwargs.keys()):\n # remove the keys that are not used by captioner.generate.\n # Or it will raise error in `transformers/generation/utils.py:_validate_model_kwargs`\n # they are used for post-processing\n if key in UNUSED_KEYS_IN_GENERATE:\n kwargs.pop(key)\n language_model_generate_ids = self.language_model.generate(\n inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs\n )\n sam_output = SamImageSegmentationOutput(iou_scores=iou_predictions, pred_masks=low_res_masks)\n language_model_generate_ids = language_model_generate_ids.view(\n batch_size, num_masks, num_output_heads, -1\n )\n query_outputs = query_outputs.view(batch_size, num_masks, num_output_heads, 1, -1)\n language_model_inputs = language_model_inputs.view(batch_size, num_masks, num_output_heads, 1, -1)\n return language_model_generate_ids, sam_output, query_outputs, language_model_inputs\n else:\n raise ValueError(\"Only decoder only language model is supported.\")\n\n if not return_dict:\n sam_output = (iou_predictions, low_res_masks)\n if output_hidden_states:\n sam_output = sam_output + (vision_hidden_states,)\n\n if output_attentions:\n sam_output = sam_output + (vision_attentions, mask_decoder_attentions)\n output = (loss, logits) + sam_output + outputs + (query_outputs, language_model_inputs)\n return output\n\n sam_output = SamImageSegmentationOutput(\n iou_scores=iou_predictions,\n pred_masks=low_res_masks,\n vision_hidden_states=vision_hidden_states,\n vision_attentions=vision_attentions,\n mask_decoder_attentions=mask_decoder_attentions,\n )\n return ScaForConditionalGnerationModelOutput(\n loss=loss,\n logits=logits,\n segmentation_outputs=sam_output,\n language_model_outputs=outputs,\n query_logits=query_outputs,\n projected_query_logits=language_model_inputs,\n )\n\n @classmethod\n def from_sam_text_pretrained(\n cls,\n sam_pretrained_model_name_or_path: str = None,\n text_pretrained_model_name_or_path: str = None,\n additional_num_hidden_layers: int = 2,\n num_caption_tokens: int = 1,\n **kwargs,\n ):\n sam_config = transformers.AutoConfig.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n sam_architectures = sam_config.architectures\n if len(sam_architectures) != 1:\n logger.warning(f\"sam_architectures: {sam_architectures} has to be of length 1\")\n text_config = transformers.AutoConfig.from_pretrained(text_pretrained_model_name_or_path, **kwargs)\n config = ScaConfig.from_sam_text_configs(\n sam_config=sam_config,\n text_config=text_config,\n additional_num_hidden_layers=additional_num_hidden_layers,\n num_caption_tokens=num_caption_tokens,\n **kwargs,\n )\n language_model = AutoModelForCausalLM.from_pretrained(text_pretrained_model_name_or_path, **kwargs)\n sca_model = cls.from_pretrained(\n sam_pretrained_model_name_or_path, config=config, language_model=language_model, **kwargs\n )\n # NOTE(xiaoke): Validate the unloaded weights in the model by calling\n # `set([\".\".join(i.split(\".\")[0:2]) for i in unloaded_weights])`\n # There should be no weights left in the pretrained weights that are unloaded.\n return sca_model\n\n @torch.no_grad()\n def generate(self, *args, **kwargs):\n language_model_generate_ids, sam_output, query_outputs, language_model_inputs = self.forward(\n \"inference\", *args, **kwargs\n )\n return ScaForConditionalGnerationModelOutput(\n sequences=language_model_generate_ids,\n segmentation_outputs=sam_output,\n query_logits=query_outputs,\n projected_query_logits=language_model_inputs,\n iou_scores=sam_output.iou_scores,\n pred_masks=sam_output.pred_masks,\n )\n\n def config_parameters(self):\n # NOTE(xiaoke): By default we freeze all the parameters in the config.\n # HF transformers trainer use requires_grad=True to filter out the parameters that need to be optimized.\n for param in self.parameters():\n param.requires_grad = False\n\n # Turn on the parameters that need to be optimized.\n TO_BE_OPTIMIZED = [\n self.mask_decoder.additional_transformer,\n self.mask_decoder.caption_tokens,\n self.language_project,\n ]\n for module in TO_BE_OPTIMIZED:\n for param in module.parameters():\n param.requires_grad = True\n\n # NOTE: To support gradient checkpoint for LM: https://github.com/huggingface/transformers/pull/19990/files\n def _set_gradient_checkpointing(self, module, value=False):\n # NOTE: Most language models in HF supprots gradient checkpointing\n # e.g., OpenLLAMA: https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/deprecated/open_llama/modeling_open_llama.py#L464C9-L464C36\n # gpt2: https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/gpt2/modeling_gpt2.py#L483C9-L483C36\n self.language_model._set_gradient_checkpointing(module, value=value)\n\n # NOTE: SAM vision encoder supports gradient checkponit\n # https://github.com/huggingface/transformers/blob/5a4f340df74b42b594aedf60199eea95cdb9bed0/src/transformers/models/sam/modeling_sam.py#L1012C14-L1012C37\n self.vision_encoder.gradient_checkpointing = value"
}
] | import sys
import pytest
import requests
import torch
import time
import numpy as np
import torch
import transformers
from PIL import Image
from src.models.sca import ScaConfig, ScaModel, ScaProcessor
from typing import Sequence
from torch.nn.utils.rnn import pad_sequence | 7,969 |
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
model = ScaModel.from_sam_text_pretrained(
sam_model_name, text_model_name, additional_num_hidden_layers, cache_dir=cache_dir
).to(device)
return model
@pytest.fixture
def processor():
|
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
model = ScaModel.from_sam_text_pretrained(
sam_model_name, text_model_name, additional_num_hidden_layers, cache_dir=cache_dir
).to(device)
return model
@pytest.fixture
def processor(): | processor = ScaProcessor.from_sam_text_pretrained(sam_model_name, text_model_name, cache_dir=cache_dir) | 1 | 2023-11-17 14:10:41+00:00 | 12k |
artwalker/EasyTranslator | easy_translator.py | [
{
"identifier": "CommandArgs",
"path": "command_args.py",
"snippet": "class CommandArgs:\r\n \"\"\"A class to read the arguments from command line .\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize command arguments.\"\"\"\r\n # Use the argparse module in the Python standard library to parse command-line arguments.\r\n parser = argparse.ArgumentParser()\r\n # Receive the parameter of the file name to be translated in the command line.\r\n parser.add_argument(\"filename\", help=\"Name of the input file\")\r\n # Select if show the text and the translated text in the console.\r\n parser.add_argument(\"--show\", help=\"Show the text and the translated text in the console\", action=\"store_true\")\r\n # Select if use the api from Azure.\r\n parser.add_argument(\"--azure\", help=\"Use the api from Azure.\", action=\"store_true\")\r\n # The test mode: only translate the first 3 short texts\r\n parser.add_argument(\"--test\", help=\"Only translate the first 3 short texts\", action=\"store_true\")\r\n # If use the translated name table\r\n parser.add_argument(\"--tlist\", help=\"Use the translated name table\", action=\"store_true\")\r\n \r\n self.args = parser.parse_args()\r"
},
{
"identifier": "ParameterReader",
"path": "parameter_reader.py",
"snippet": "class ParameterReader:\n \"\"\"A class to read the parameters from the settings.cfg file and the .env file.\"\"\"\n\n def __init__(self, commandArgs):\n \"\"\"Read the parameters from the settings.cfg file and the .env file.\"\"\"\n # The command line arguments\n self.commandArgs = commandArgs\n self.filename = \"\"\n self.show = \"\"\n self.azure = \"\"\n self.test = \"\"\n self.tlist = \"\"\n self.base_filename = \"\"\n self.file_extension = \"\"\n self.new_filename = \"\"\n self.new_filenametxt = \"\"\n self.jsonfile = \"\"\n self.translated_dict = {}\n self.api_proxy_url = \"\"\n self.gpt_model = \"\"\n self.openai_api_engine_azure = \"\"\n self.openai_api_model_azure = \"\"\n self.client = \"\"\n self.non_azure_client = \"\"\n self.gpt_temperature = \"\"\n \n # The arguments from the settings.cfg file\n self.language = \"\"\n self.prompt_template = \"\"\n self.prompt = \"\"\n self.bilingual_output = \"\"\n self.language_code = \"\"\n self.api_proxy = \"\"\n self.startpage = \"\"\n self.endpage = \"\"\n self.transliteration_list_file = \"\"\n self.transliteration_word_capi_low = \"\"\n\n # 1. Set the parameters from the command line.\n self._set_args_from_command()\n # 3. Set the parameters from the settings.cfg file and the .env file.\n self._set_args_from_parameter_reader()\n # 2. Set the OpenAI API key.\n self._access_openai_key()\n # 4. Load the translated dictionary from the json file.\n self._load_tranlated_dict()\n\n def _access_openai_key(self):\n \"\"\"set the OpenAI API key.\"\"\"\n _ = load_dotenv(find_dotenv(), override=True)\n self.gpt_temperature = float(os.getenv('GPT_TEMPERATURE'))\n if self.azure:\n # imort the azure.identity package\n from openai import AzureOpenAI\n\n # Set the Azure OpenAI parameters\n self.client = AzureOpenAI(\n api_version=os.getenv('OPENAI_API_VERSION_AZURE'),\n azure_endpoint=os.getenv('OPENAI_API_ENDPOINT_AZURE'),\n api_key=os.getenv('OPENAI_API_KEY_AZURE'),\n )\n\n self.openai_api_model_azure = os.getenv('OPENAI_API_MODEL_AZURE')\n else:\n # Get the OpenAI API keys from the .env file\n key_sets = os.getenv('OPENAI_API_KEY')\n # If there are multiple keys, split them into an array\n key_array = key_sets.split(',')\n\n if len(self.api_proxy) == 0:\n # Set the OpenAI API key\n openai.api_key = random.choice(key_array)\n else:\n # Create an OpenAI client with proxy\n api_key = random.choice(key_array)\n self.api_proxy_url = self.api_proxy\n base_url = os.environ.get(\"OPENAI_API_URL\", self.api_proxy_url)\n self.non_azure_client = openai.OpenAI(api_key=api_key, base_url=base_url)\n print(\"-\" * 3)\n print(f\"\\033[1;32mUsing OpenAI API proxy, the proxy address is: {base_url}\\033[0m\")\n\n self.gpt_model = os.getenv('GPT_MODEL')\n\n def _set_args_from_parameter_reader(self):\n \"\"\"Get the settings from the settings.cfg file.\"\"\"\n with open('settings.cfg', 'rb') as f:\n content = f.read()\n self.encoding = chardet.detect(content)['encoding']\n\n with open('settings.cfg', encoding=self.encoding) as f:\n config_text = f.read()\n self.config = configparser.ConfigParser()\n self.config.read_string(config_text)\n \n # Get the settings from the settings.cfg file\n self.language = self.config.get('config', 'language')\n self.prompt_template = self.config.get('config', 'prompt')\n self.prompt = self.prompt_template.format(self.language)\n self.bilingual_output = self.config.get('config', 'bilingual-output')\n self.language_code = self.config.get('config', 'langcode')\n self.api_proxy=self.config.get('config', 'openai-proxy')\n # Get the start and end page of the PDF file\n self.startpage = self.config.getint('config', 'startpage', fallback=1)\n self.endpage = self.config.getint('config', 'endpage', fallback=-1)\n # Get the transliteration list file\n self.transliteration_list_file = self.config.get('config', 'transliteration-list')\n # Get the setting of case to determine whether to do transliteration\n self.transliteration_word_capi_low = self.config.get('config', 'transliteration-word-capi-low')\n\n def _set_args_from_command(self):\n \"\"\"Set arguments from the command line.\"\"\"\n self.filename = self.commandArgs.args.filename\n self.show = self.commandArgs.args.show\n self.test = self.commandArgs.args.test\n self.tlist = self.commandArgs.args.tlist\n self.azure = self.commandArgs.args.azure\n\n self.base_filename, self.file_extension = os.path.splitext(self.filename)\n self.new_filename = self.base_filename + \"_translated.epub\"\n self.new_filenametxt = self.base_filename + \"_translated.txt\"\n self.jsonfile = self.base_filename + \"_process.json\"\n\n def _load_tranlated_dict(self):\n \"\"\"\n Load the translated dictionary from the json file.\n Such as the translation stoped in the middle, \n and the translated dictionary is saved in the json file.\n So we can continue the translation from the last stop.\n \"\"\"\n try:\n if os.path.getsize(self.jsonfile) > 0:\n with open(self.jsonfile, \"r\", encoding=\"utf-8\") as f:\n self.translated_dict = json.load(f)\n except Exception as e:\n #print(e)\n pass"
},
{
"identifier": "ProcessFile",
"path": "process_file.py",
"snippet": "class ProcessFile:\r\n \"\"\"A class about according to the file extension, use the corresponding function to convert the file to text.\"\"\"\r\n\r\n def __init__(self, parameterReader):\r\n \"\"\"Initialize the title of filename and text which receives the contents of file.\"\"\"\r\n self.filename = \"\"\r\n self.start_page = 0\r\n self.end_page = 0\r\n self.total_pages = 0\r\n self.transliteration_list_file = \"\"\r\n self.transliteration_word_capi_low = \"\"\r\n self.bilingual_output = \"\"\r\n self.prompt = \"\"\r\n self.language_code = \"\"\r\n self.jsonfile = \"\"\r\n self.translated_dict = \"\"\r\n self.new_filename = \"\"\r\n self.new_filenametxt = \"\"\r\n self.show = \"\"\r\n self.azure = \"\"\r\n self.tlist = \"\"\r\n self.test = \"\"\r\n self.gpt_model = \"\"\r\n self.gpt_temperature = \"\"\r\n\r\n self.title = \"\"\r\n self.text = \"\"\r\n self.book = \"\"\r\n self.total_tokens = 0\r\n self.completion_tokens = 0\r\n self.prompt_tokens = 0\r\n self.short_text_list = \"\"\r\n self.translated_text = \"\"\r\n self.translated_short_text = \"\"\r\n self.count = 0\r\n self.messages = \"\"\r\n\r\n self.client = \"\"\r\n self.non_azure_client = \"\"\r\n\r\n self._set_args_from_parameterReader(parameterReader)\r\n\r\n def _set_args_from_parameterReader(self, parameterReader):\r\n \"\"\"Set args from parameterReader.\"\"\"\r\n self.filename = parameterReader.filename\r\n self.start_page = parameterReader.startpage\r\n self.end_page = parameterReader.endpage\r\n self.total_pages = 0\r\n self.transliteration_list_file = parameterReader.transliteration_list_file\r\n self.transliteration_word_capi_low = parameterReader.transliteration_word_capi_low\r\n self.bilingual_output = parameterReader.bilingual_output\r\n self.prompt = parameterReader.prompt\r\n self.language_code = parameterReader.language_code\r\n self.jsonfile = parameterReader.jsonfile\r\n self.translated_dict = parameterReader.translated_dict\r\n self.new_filename = parameterReader.new_filename\r\n self.new_filenametxt = parameterReader.new_filenametxt\r\n self.show = parameterReader.show\r\n self.tlist = parameterReader.tlist\r\n self.test = parameterReader.test\r\n self.gpt_model = parameterReader.gpt_model\r\n self.gpt_temperature = parameterReader.gpt_temperature\r\n self.api_proxy = parameterReader.api_proxy\r\n\r\n self.azure = parameterReader.azure\r\n if self.azure:\r\n self.client = parameterReader.client\r\n self.openai_api_model_azure = parameterReader.openai_api_model_azure\r\n \r\n if len(self.api_proxy) != 0:\r\n self.non_azure_client = parameterReader.non_azure_client\r\n\r\n\r\n def _get_pdf_total_pages(self):\r\n \"\"\"Get total pages.\"\"\"\r\n with open(self.filename, 'rb') as file:\r\n parser = PDFParser(file)\r\n document = PDFDocument(parser)\r\n self.total_pages = len(list(PDFPage.create_pages(document)))\r\n\r\n def _convert_pdf_to_text(self):\r\n \"\"\"Access the contents of the PDF file and convert it to text.\"\"\"\r\n print(\"\\033[1;32mINFO:Converting pdf to text.\\033[0m\")\r\n self.text = pdfminer.high_level.extract_text(self.filename, page_numbers=list(range(self.start_page - 1, self.end_page)))\r\n print(\"-\" * 3)\r\n if self.show:\r\n print(\"*\" * 3)\r\n print(self.text)\r\n print(\"*\" * 3)\r\n print(\"\\033[1;32mINFO:Finished converting pdf to text\\033[0m\")\r\n\r\n def _convert_mobi_to_text(self):\r\n \"\"\"Access the content fo mobi and then convert it to text.\"\"\"\r\n # Extract MOBI contents to a temporary directory\r\n with tempfile.TemporaryDirectory() as tempdir:\r\n tempdir, filepath = mobi.extract(self.filename)\r\n\r\n # Find the HTML file in the temporary directory\r\n for root, _, files in os.walk(tempdir):\r\n for file in files:\r\n if file.endswith(\".html\"):\r\n html_file = os.path.join(root, file)\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n raise FileNotFoundError(\"ERROR:HTML file not found in the extracted MOBI contents\")\r\n\r\n # Parse the HTML file with BeautifulSoup to get the text\r\n with open(html_file, \"r\", encoding=\"utf-8\") as f:\r\n soup = BeautifulSoup(f.read(), \"html.parser\")\r\n self.text = soup.get_text()\r\n\r\n def _convert_docx_to_text(self):\r\n \"\"\"Access the content of docx and then convert it to text.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the DOCX content.\\033[0m\")\r\n doc = docx.Document(self.filename)\r\n\r\n for paragraph in doc.paragraphs:\r\n self.text += paragraph.text + \"\\n\"\r\n\r\n def _convert_epub_to_text(self):\r\n \"\"\"Convert epub to text.\"\"\"\r\n # Access all contents\r\n for item in self.book.get_items():\r\n if item.get_type() == ebooklib.ITEM_DOCUMENT:\r\n # Use BeautifulSoup to extract the original text\r\n soup = BeautifulSoup(item.get_content(), 'html.parser')\r\n self.text += re.sub(r'\\n+', '\\n', soup.get_text().strip())\r\n\r\n def _text_replace(self):\r\n \"\"\"Replace the text according to the transliteration table.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n old_words = df.iloc[:, 0].tolist()\r\n new_words = df.iloc[:, 1].tolist()\r\n # Order the old word list in descending order of length and synchronize the new word list\r\n old_words, new_words = zip(*sorted(zip(old_words, new_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(old_words)):\r\n # If ingore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_old_word = old_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n self.text = re.sub(r\"\\b\" + lower_old_word + r\"\\b\", new_words[i], self.text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n self.text = re.sub(r\"\\b\" + old_words[i] + r\"\\b\", new_words[i], self.text)\r\n\r\n def _text_replace_reverse(self, text):\r\n \"\"\"Replace the text according to the transliteration table in reverse order.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n old_words = df.iloc[:, 0].tolist() # Swapped\r\n new_words = df.iloc[:, 1].tolist() # Swapped\r\n # Order the new word list in descending order of length and synchronize the old word list\r\n new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(new_words)):\r\n # If ignore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_new_word = new_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n text = re.sub(r\"\\b\" + lower_new_word + r\"\\b\", old_words[i], text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n text = re.sub(r\"\\b\" + new_words[i] + r\"\\b\", old_words[i], text)\r\n\r\n return text\r\n\r\n def _reverse_text_replace_reverse(self, text):\r\n \"\"\"Reverse the text according to the transliteration table in reverse order.\"\"\"\r\n # Read the excel file and store the first column and the second column as two lists\r\n df = pd.read_excel(self.transliteration_list_file)\r\n new_words = df.iloc[:, 0].tolist() # Swapped\r\n old_words = df.iloc[:, 1].tolist() # Swapped\r\n # Order the new word list in descending order of length and synchronize the old word list\r\n new_words, old_words = zip(*sorted(zip(new_words, old_words), key=lambda x: len(x[0]), reverse=True))\r\n # Iterate through two lists and replace strings\r\n for i in range(len(new_words)):\r\n # If ignore the case, convert the string and the word to be replaced to lowercase\r\n if not self.transliteration_word_capi_low:\r\n lower_new_word = new_words[i].lower()\r\n # Use the regular expression to replace, note that the original string case is retained\r\n text = re.sub(r\"\\b\" + lower_new_word + r\"\\b\", old_words[i], text, flags=re.IGNORECASE)\r\n else:\r\n # If care about the case, just use the regular expression to replace\r\n text = re.sub(r\"\\b\" + new_words[i] + r\"\\b\", old_words[i], text)\r\n\r\n return text\r\n\r\n def _split_text(self):\r\n \"\"\"Divide the text into a list of short texts with no more than 1024 characters.\"\"\"\r\n # Use the regular expression to split the text into a list of sentences\r\n sentence_list = re.findall(r'.+?[。!?!?.]', self.text)\r\n # Initialize the short text list\r\n self.short_text_list = []\r\n # Initialize the current short text\r\n short_text = \"\"\r\n # Iterate through the sentence list\r\n for s in sentence_list:\r\n # If the current short plus the length of the new sentence is not greater than 1024, add the new sentence to the current short\r\n if len(short_text + s) <= 1024:\r\n short_text += s\r\n # If the current short plus the length of the new sentence is greater than 1024, add the current short to the short text list and reset the current short to the new sentence\r\n else:\r\n self.short_text_list.append(short_text)\r\n short_text = s\r\n # Add the last short text to the short text list\r\n self.short_text_list.append(short_text)\r\n\r\n def _replace_sign(self, text):\r\n \"\"\"Replace the period with a period plus line break.\"\"\"\r\n text = text.replace(\". \", \".\\n\")\r\n text = text.replace(\"。\", \"。\\n\")\r\n text = text.replace(\"?\", \"?\\n\")\r\n text = text.replace(\"?\", \"?\\n\")\r\n text = text.replace(\"!\", \"!\\n\")\r\n text = text.replace(\"。\\n”\", \"。”\\n\")\r\n text = text.replace(\"!\\n”\", \"!”\\n\")\r\n text = text.replace(\"?\\n”\", \"?”\\n\")\r\n return text\r\n\r\n def _get_completion_from_messages(self):\r\n \"\"\"Get completion from messages.\"\"\"\r\n if len(self.api_proxy) == 0:\r\n response = openai.chat.completions.create(\r\n model=self.gpt_model,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature,\r\n )\r\n else:\r\n response = self.non_azure_client.chat.completions.create(\r\n model=self.gpt_model,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature,\r\n )\r\n\r\n content = response.choices[0].message.content\r\n\r\n token_dict = {\r\n 'prompt_tokens':response.usage.prompt_tokens,\r\n 'completion_tokens':response.usage.completion_tokens,\r\n 'total_tokens':response.usage.total_tokens,\r\n }\r\n\r\n return content, token_dict\r\n\r\n def _get_completion_from_messages_by_azure(self):\r\n \"\"\"Get completion from messages by azure.\"\"\"\r\n response = self.client.chat.completions.create(\r\n model=self.openai_api_model_azure,\r\n messages=self.messages,\r\n temperature=self.gpt_temperature, \r\n )\r\n\r\n #print(str(response.choices[0].message))\r\n content = response.choices[0].message.content\r\n\r\n token_dict = {\r\n 'prompt_tokens':response.usage.prompt_tokens,\r\n 'completion_tokens':response.usage.completion_tokens,\r\n 'total_tokens':response.usage.total_tokens,\r\n }\r\n return content, token_dict\r\n\r\n def _comletion_tokens(self):\r\n \"\"\"Get comletion and tokens.\"\"\"\r\n if self.azure:\r\n completion, token_dict = self._get_completion_from_messages_by_azure()\r\n else:\r\n completion, token_dict = self._get_completion_from_messages()\r\n self.translated_short_text = (\r\n completion\r\n .encode(\"utf8\")\r\n .decode()\r\n )\r\n # Get the token usage from the API response\r\n self.total_tokens += token_dict['total_tokens']\r\n self.completion_tokens += token_dict['completion_tokens']\r\n self.prompt_tokens += token_dict['prompt_tokens']\r\n\r\n def _translate_text(self, content):\r\n \"\"\"Translate the text.\"\"\"\r\n # Call the OpenAI API for translation\r\n try:\r\n self.messages = [\r\n {'role':'system', \r\n 'content': f\"You are a translation assistant.Your task is to translate the content given to you by the user.{self.prompt}\"},\r\n {'role': 'user',\r\n 'content': f\"{content}\\n\"},\r\n ]\r\n self._comletion_tokens()\r\n except Exception as e:\r\n # Time to wait for limitation of ChatGPT\r\n sleep_time = 60 * 3 + 5\r\n print(e, \"\\n\"+f\"Sleep {sleep_time} seconds.\")\r\n time.sleep(sleep_time)\r\n self._comletion_tokens()\r\n\r\n def _translate_and_store(self, text):\r\n \"\"\"Tranlate and store text.\"\"\"\r\n if self.tlist:\r\n # Revert the replacement so that it can be judged whether the text has been translated\r\n text = self._text_replace_reverse(text)\r\n # If the text has been translated, return the translation result directly\r\n if text in self.translated_dict:\r\n self.translated_short_text = self.translated_dict[text]\r\n else:\r\n # Before translation, replace the text according to the transliteration table\r\n text = self._reverse_text_replace_reverse(text)\r\n # Else, call the translate_text function to translate and store the result in the dictionary\r\n self._translate_text(text)\r\n # Reverse the replacement of the transliteration table so than the text keeps the original content\r\n text = self._text_replace_reverse(text)\r\n self.translated_dict[text] = self.translated_short_text\r\n # Save the dictionary as a JSON file\r\n with open(self.jsonfile, \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)\r\n else:\r\n # If the text has been translated, return the translation result directly\r\n if text in self.translated_dict:\r\n self.translated_short_text = self.translated_dict[text]\r\n else:\r\n # Else, call the translate_text function to translate and store the result in the dictionary\r\n self._translate_text(text)\r\n self.translated_dict[text] = self.translated_short_text\r\n # Save the dictionary as a JSON file\r\n with open(self.jsonfile, \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.translated_dict, f, ensure_ascii=False, indent=4)\r\n\r\n def _process_text(self):\r\n \"\"\"Process the text.\"\"\"\r\n # Replace all line breaks with spaces\r\n self.text = self.text.replace(\"\\n\", \" \")\r\n # Replace multiple spaces with one space\r\n self.text = re.sub(r\"\\s+\", \" \", self.text)\r\n # If the transliteration table replacement is set, replace the text before translation\r\n if self.tlist:\r\n self._text_replace()\r\n # Split the text into short texts of no more than 1024 characters\r\n self._split_text()\r\n # If the test mode is turned on, only translate the first 3 short texts\r\n if self.test:\r\n self.short_text_list = self.short_text_list[:3]\r\n # Iterate through the short text list and translate each short text in turn\r\n for short_text in self.short_text_list:\r\n self.count += 1\r\n # Translate the current short text\r\n time.sleep(0.5)\r\n self._translate_and_store(short_text)\r\n short_text = self._replace_sign(short_text)\r\n self.translated_short_text = self._replace_sign(self.translated_short_text)\r\n short_text = self._text_replace_reverse(short_text)\r\n # Add the current short text and the translated text to the total text\r\n if self.bilingual_output.lower() == 'true':\r\n self.translated_text += f\"{short_text}<br>\\n{self.translated_short_text}<br>\\n\"\r\n else:\r\n self.translated_text += f\"{self.translated_short_text}<br>\\n\"\r\n if self.show:\r\n print(\"*\" * 3)\r\n print(short_text)\r\n print(\"*\" * 1)\r\n print(self.translated_short_text)\r\n print(\"*\" * 3)\r\n\r\n def _text_to_epub(self):\r\n \"\"\"Write the translated text to the epub file.\"\"\"\r\n text = self.translated_text.replace('\\n', '<br>').replace(\"\\n\", \"<br>\")\r\n # Create an epub book object\r\n book = epub.EpubBook()\r\n # Set the metadata\r\n book.set_identifier(str(random.randint(100000, 999999)))\r\n book.set_title(self.title)\r\n book.set_language(self.language_code)\r\n # Create a chapter object\r\n c = epub.EpubHtml(title='Chapter 1', file_name='chap_1.xhtml', lang=self.language_code)\r\n c.content = text\r\n # Add the chapter to the book\r\n book.add_item(c)\r\n # Add the table of contents\r\n book.toc = (epub.Link('chap_1.xhtml', 'Chapter 1', 'chap_1'),)\r\n # Set spine order\r\n book.spine = ['nav', c]\r\n # Add navigation files\r\n book.add_item(epub.EpubNcx())\r\n book.add_item(epub.EpubNav())\r\n # Write the content to the epub book\r\n #print(\"\\n\" + text)\r\n try:\r\n epub.write_epub(self.new_filename, book, {})\r\n except Exception as e:\r\n print(f\"Failed to write EPUB: {e}\")\r\n\r\n def _get_title_of_md(self):\r\n \"\"\"Get title of the md.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the md title.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n if line.startswith('#'):\r\n self.title = line.replace('#', '').strip()\r\n break\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the md title.\\033[0m\")\r\n\r\n def _get_title_of_txt(self):\r\n \"\"\"Get title of the txt.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the txt title.\\033[0m\")\r\n title_extension = os.path.basename(self.filename)\r\n self.title = os.path.splitext(title_extension)[0]\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the txt title.\\033[0m\")\r\n\r\n def _get_title_of_docx(self):\r\n \"\"\"Get title of the docx.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the docx file.\\033[0m\")\r\n with zipfile.ZipFile(self.filename) as zf:\r\n core_properties = etree.fromstring(zf.read(\"docProps/core.xml\"))\r\n\r\n ns = {\"cp\": \"http://schemas.openxmlformats.org/package/2006/metadata/core-properties\",\r\n \"dc\": \"http://purl.org/dc/elements/1.1/\",\r\n \"dcterms\": \"http://purl.org/dc/terms/\",\r\n \"dcmitype\": \"http://purl.org/dc/dcmitype/\",\r\n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\"}\r\n \r\n title_elements = core_properties.findall(\"dc:title\", ns)\r\n if title_elements:\r\n self.title = title_elements[0].text\r\n else:\r\n self.title = \"INFO:Unknown title.\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the docx title.\\033[0m\")\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the DOCX file.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_title_of_pdf(self):\r\n \"\"\"Get title of the pdf.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the pdf title.\\033[0m\") \r\n with open(self.filename, 'rb') as file:\r\n parser = PDFParser(file)\r\n document = PDFDocument(parser)\r\n if 'Title' in document.info:\r\n self.title = document.info['Title']\r\n else:\r\n text = pdfminer.high_level.extract_text(file)\r\n match = re.search(r'(?<=\\n)([^\\n]+)(?=\\n)', text)\r\n if match:\r\n self.title = match.group(1)\r\n else:\r\n self.title = \"INFO:Unknown title.\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing the pdf title.\\033[0m\") \r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the pdf title.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n # step 1\r\n def get_title(self):\r\n \"\"\"Get the title of file.\"\"\"\r\n if self.filename.endswith('.pdf'):\r\n self._get_title_of_pdf()\r\n self._get_pdf_total_pages()\r\n elif self.filename.endswith('.txt'):\r\n self._get_title_of_txt()\r\n elif self.filename.endswith('.docx'):\r\n self._get_title_of_docx()\r\n elif self.filename.endswith('.mobi'):\r\n pass\r\n elif self.filename.endswith('.epub'):\r\n self.book = epub.read_epub(self.filename)\r\n elif self.filename.endswith('.md'):\r\n self._get_title_of_md()\r\n else:\r\n print(\"-\" * 3)\r\n print(\"\\033[91mINFO:Unsupported file type right now.\\033[0m\")\r\n print(\"-\" * 3)\r\n sys.exit(0)\r\n\r\n def _get_md_content(self):\r\n \"\"\"Get md content.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the md content.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n self.text = file.read()\r\n\r\n def _get_txt_content(self):\r\n \"\"\"Get txt content.\"\"\"\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the txt content.\\033[0m\")\r\n with open(self.filename, 'r', encoding='utf-8') as file:\r\n self.text = file.read()\r\n\r\n def _get_pdf_content(self):\r\n \"\"\"Get pdf content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the pdf content.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Total pages of the pdf: {self.total_pages}\\033[0m\") \r\n if self.end_page == -1:\r\n self.end_page = self.total_pages\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Converting pdf from: Page {self.start_page} to Page {self.end_page}.\\033[0m\") \r\n print(\"-\" * 3)\r\n self._convert_pdf_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the pdf content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_mobi_content(self):\r\n \"\"\"Get mobi content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the mobi content.\\033[0m\")\r\n self._convert_mobi_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the MOBI content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n def _get_epub_content(self):\r\n \"\"\"Get mobi content.\"\"\"\r\n try:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Parsing the EPUB content.\\033[0m\")\r\n self._convert_epub_to_text()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(\"\\033[91mERROR:Parsing the EPUB content.\\033[0m\")\r\n print(\"*\" * 6)\r\n\r\n # step 2\r\n def convert_text(self):\r\n \"\"\"Convert the file ending with differnt types to text.\"\"\"\r\n if self.filename.endswith('.pdf'):\r\n self._get_pdf_content()\r\n elif self.filename.endswith('.txt'):\r\n self._get_txt_content()\r\n elif self.filename.endswith('.mobi'):\r\n self._get_mobi_content()\r\n elif self.filename.endswith('.docx'):\r\n self._convert_docx_to_text()\r\n elif self.filename.endswith('.epub'):\r\n self._get_epub_content()\r\n elif self.filename.endswith('.md'):\r\n self._get_md_content()\r\n else:\r\n print(\"\\033[91mINFO:Unsupported to access the content of this file type right now.\\033[0m\")\r\n\r\n # step 3\r\n def tranlate_file(self):\r\n \"\"\"Translate the file.\"\"\"\r\n if self.filename.endswith('.epub'):\r\n # Access all chapters of the epub file\r\n items = self.book.get_items()\r\n # Iterate through all chapters\r\n translated_all = ''\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Translating the file content.\\033[0m\")\r\n for item in items:\r\n # If the chapter type is a document type, it needs to be translated\r\n if item.get_type() == ebooklib.ITEM_DOCUMENT:\r\n # Use BeautifulSoup to extract the original text\r\n soup = BeautifulSoup(item.get_content(), 'html.parser')\r\n self.text = soup.get_text().strip()\r\n img_html = ''\r\n img_tags = soup.find_all('img')\r\n for img_tag in img_tags:\r\n img_html += str(img_tag) + '<br>'\r\n # If the text is empty, skip this chapter\r\n if not self.text:\r\n continue\r\n self._process_text()\r\n # Replace the original chapter content with the translated text\r\n item.set_content((img_html + self.translated_text.replace('\\n', '<br>')).encode('utf-8'))\r\n translated_all += self.translated_text\r\n # If the test mode is turned on, only translate the first 3 chapters\r\n if self.test and self.count >= 3:\r\n break\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing and translating the file.\\033[0m\")\r\n # Write content to the epub file\r\n epub.write_epub(self.new_filename, self.book, {})\r\n # Write the translated text to the txt file\r\n with open(self.new_filenametxt, \"w\", encoding=\"utf-8\") as f:\r\n f.write(translated_all.replace('<br>', ''))\r\n else:\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Translating the file content.\\033[0m\")\r\n self._process_text()\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Finished parsing and translating the file.\\033[0m\")\r\n print(\"-\" * 3)\r\n # Write the translated text to the epub file\r\n print(\"\\033[1;32mINFO:Writing the translated text to epub.\\033[0m\") # 输出绿色的 \"DEBUG\"\r\n self._text_to_epub()\r\n # Write the translated text to the txt file\r\n print(\"-\" * 3)\r\n print(\"\\033[1;32mINFO:Writing the translated text to the txt file.\\033[0m\")\r\n with open(self.new_filenametxt, \"w\", encoding=\"utf-8\") as f:\r\n f.write(self.translated_text.replace('<br>', ''))\r\n\r\n # step 4\r\n def caculate_tokens_costs(self):\r\n \"\"\"Caculate the tokens.\"\"\"\r\n cost = self.completion_tokens / 1000 * 0.002 + self.prompt_tokens / 1000 * 0.001\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use completion tokens: {self.completion_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use prompt tokens: {self.prompt_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Use total tokens: {self.total_tokens}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;32mINFO:Total approximate cost: ${cost}.\\033[0m\")\r\n print(\"-\" * 3)\r\n print(f\"\\033[1;34mINFO:Translation completed.\\033[0m\")\r\n print(\"-\" * 3)\r\n\r\n # step 5\r\n def remove_jsonfile(self):\r\n \"\"\"Remove the jsonfile.\"\"\"\r\n try:\r\n os.remove(self.jsonfile)\r\n print(f\"\\033[1;34mFile '{self.jsonfile}' has been deleted.\\033[0m\")\r\n print(\"-\" * 3)\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n print(\"*\" * 6)\r\n print(f\"\\033[91mERROR:File '{self.jsonfile}' not found. No file was deleted.\\033[0m\")\r\n print(\"*\" * 6)\r"
}
] | from command_args import CommandArgs
from parameter_reader import ParameterReader
from process_file import ProcessFile
| 9,055 |
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
|
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
| self.parameterReader = ParameterReader(self.commandArgs)
| 1 | 2023-11-10 15:56:06+00:00 | 12k |
ShipBit/wingman-ai | main.py | [
{
"identifier": "AudioRecorder",
"path": "services/audio_recorder.py",
"snippet": "class AudioRecorder(FileCreator):\n def __init__(\n self,\n app_root_dir: str,\n samplerate: int = 44100,\n channels: int = 1,\n ):\n super().__init__(app_root_dir, RECORDING_PATH)\n self.file_path = self.get_full_file_path(RECORDING_FILE)\n\n self.samplerate = samplerate\n self.is_recording = False\n self.recording = None\n\n self.recstream = sounddevice.InputStream(\n callback=self.__handle_input_stream,\n channels=channels,\n samplerate=samplerate,\n )\n\n def __handle_input_stream(self, indata, _frames, _time, _status):\n if self.is_recording:\n if self.recording is None:\n self.recording = indata.copy()\n else:\n self.recording = numpy.concatenate((self.recording, indata.copy()))\n\n def start_recording(self):\n if self.is_recording:\n return\n\n self.recstream.start()\n self.is_recording = True\n printr.print(\"Recording started\", tags=\"grey\")\n\n def stop_recording(self) -> None | str:\n self.recstream.stop()\n self.is_recording = False\n printr.print(\"Recording stopped\", tags=\"grey\")\n\n if self.recording is None:\n printr.print(\"Ignored empty recording\", tags=\"warn\")\n return None\n if (len(self.recording) / self.samplerate) < 0.15:\n printr.print(\"Recording was too short to be handled by the AI\", tags=\"warn\")\n return None\n\n try:\n soundfile.write(self.file_path, self.recording, self.samplerate)\n self.recording = None\n return self.file_path\n except IndexError:\n printr.print(\"Ignored empty recording\", tags=\"warn\")\n return None"
},
{
"identifier": "SecretKeeper",
"path": "services/secret_keeper.py",
"snippet": "class SecretKeeper:\n def __init__(self, app_root_path: str):\n self.printr = Printr()\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.config_file = os.path.join(self.system_config_path, SECRETS_FILE)\n self.secrets = self.__load()\n if not self.secrets:\n self.secrets = {}\n\n def __load(self) -> dict[str, any]: # type: ignore\n parsed_config = None\n\n if os.path.exists(self.config_file) and os.path.isfile(self.config_file):\n with open(self.config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load ({SECRETS_FILE})\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def save(self):\n \"\"\"Write all secrets to the file\"\"\"\n with open(self.config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(self.secrets, stream)\n return True\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write ({SECRETS_FILE})\\n{str(e)}\", True\n )\n return False\n\n def retrieve(\n self,\n requester: str,\n key: str,\n friendly_key_name: str,\n prompt_if_missing: bool = True,\n ) -> str:\n \"\"\"Retrieve secret a secret and optionally prompt user for it if missing\"\"\"\n\n secret = self.secrets.get(key, None)\n if not secret and prompt_if_missing:\n # Prompt user for key\n dialog = ctk.CTkInputDialog(\n text=f\"Please enter '{friendly_key_name}':\",\n title=f\"{requester} needs to know a secret\",\n )\n secret = dialog.get_input()\n if secret:\n secret = secret.strip().replace(\"\\n\", \"\")\n self.secrets[key] = secret\n self.save()\n\n return secret"
},
{
"identifier": "Tower",
"path": "services/tower.py",
"snippet": "class Tower:\n def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore\n self.config = config\n self.app_root_dir = app_root_dir\n self.secret_keeper = secret_keeper\n self.key_wingman_dict: dict[str, Wingman] = {}\n self.broken_wingmen = []\n\n self.wingmen = self.__instantiate_wingmen()\n self.key_wingman_dict: dict[str, Wingman] = {}\n for wingman in self.wingmen:\n self.key_wingman_dict[wingman.get_record_key()] = wingman\n\n def __instantiate_wingmen(self) -> list[Wingman]:\n wingmen = []\n for wingman_name, wingman_config in self.config[\"wingmen\"].items():\n if wingman_config.get(\"disabled\") is True:\n continue\n\n global_config = {\n \"sound\": self.config.get(\"sound\", {}),\n \"openai\": self.config.get(\"openai\", {}),\n \"features\": self.config.get(\"features\", {}),\n \"edge_tts\": self.config.get(\"edge_tts\", {}),\n \"commands\": self.config.get(\"commands\", {}),\n \"elevenlabs\": self.config.get(\"elevenlabs\", {}),\n \"azure\": self.config.get(\"azure\", {}),\n }\n merged_config = self.__merge_configs(global_config, wingman_config)\n class_config = merged_config.get(\"class\")\n\n wingman = None\n # it's a custom Wingman\n try:\n if class_config:\n kwargs = class_config.get(\"args\", {})\n wingman = Wingman.create_dynamically(\n name=wingman_name,\n config=merged_config,\n secret_keeper=self.secret_keeper,\n module_path=class_config.get(\"module\"),\n class_name=class_config.get(\"name\"),\n app_root_dir=self.app_root_dir,\n **kwargs\n )\n else:\n wingman = OpenAiWingman(\n name=wingman_name,\n config=merged_config,\n secret_keeper=self.secret_keeper,\n app_root_dir=self.app_root_dir,\n )\n except MissingApiKeyException:\n self.broken_wingmen.append(\n {\n \"name\": wingman_name,\n \"error\": \"Missing API key. Please check your key config.\",\n }\n )\n except Exception as e: # pylint: disable=broad-except\n # just in case we missed something\n msg = str(e).strip()\n if not msg:\n msg = type(e).__name__\n self.broken_wingmen.append({\"name\": wingman_name, \"error\": msg})\n else:\n # additional validation check if no exception was raised\n errors = wingman.validate()\n if not errors or len(errors) == 0:\n wingman.prepare()\n wingmen.append(wingman)\n else:\n self.broken_wingmen.append(\n {\"name\": wingman_name, \"error\": \", \".join(errors)}\n )\n\n return wingmen\n\n def get_wingman_from_key(self, key: any) -> Wingman | None: # type: ignore\n if hasattr(key, \"char\"):\n wingman = self.key_wingman_dict.get(key.char, None)\n else:\n wingman = self.key_wingman_dict.get(key.name, None)\n return wingman\n\n def get_wingmen(self):\n return self.wingmen\n\n def get_broken_wingmen(self):\n return self.broken_wingmen\n\n def get_config(self):\n return self.config\n\n def __deep_merge(self, source, updates):\n \"\"\"Recursively merges updates into source.\"\"\"\n for key, value in updates.items():\n if isinstance(value, dict):\n node = source.setdefault(key, {})\n self.__deep_merge(node, value)\n else:\n source[key] = value\n return source\n\n def __merge_command_lists(self, general_commands, wingman_commands):\n \"\"\"Merge two lists of commands, where wingman-specific commands override or get added based on the 'name' key.\"\"\"\n # Use a dictionary to ensure unique names and allow easy overrides\n merged_commands = {cmd[\"name\"]: cmd for cmd in general_commands}\n for cmd in wingman_commands:\n merged_commands[\n cmd[\"name\"]\n ] = cmd # Will override or add the wingman-specific command\n # Convert merged commands back to a list since that's the expected format\n return list(merged_commands.values())\n\n def __merge_configs(self, general, wingman):\n \"\"\"Merge general settings with a specific wingman's overrides, including commands.\"\"\"\n # Start with a copy of the wingman's specific config to keep it intact.\n merged = wingman.copy()\n # Update 'openai', 'features', and 'edge_tts' sections from general config into wingman's config.\n for key in [\"sound\", \"openai\", \"features\", \"edge_tts\", \"elevenlabs\", \"azure\"]:\n if key in general:\n # Use copy.deepcopy to ensure a full deep copy is made and original is untouched.\n merged[key] = self.__deep_merge(\n copy.deepcopy(general[key]), wingman.get(key, {})\n )\n\n # Special handling for merging the commands lists\n if \"commands\" in general and \"commands\" in wingman:\n merged[\"commands\"] = self.__merge_command_lists(\n general[\"commands\"], wingman[\"commands\"]\n )\n elif \"commands\" in general:\n # If the wingman config does not have commands, use the general ones\n merged[\"commands\"] = general[\"commands\"]\n # No else needed; if 'commands' is not in general, we simply don't set it\n\n return merged"
},
{
"identifier": "Printr",
"path": "services/printr.py",
"snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")"
},
{
"identifier": "ConfigManager",
"path": "services/config_manager.py",
"snippet": "class ConfigManager:\n def __init__(self, app_root_path: str, app_is_bundled: bool):\n self.printr = Printr()\n self.gui_config = {}\n self.contexts = [\"\"]\n self.context_config_path: str = os.path.join(\n app_root_path,\n CONTEXT_CONFIG_PATH_BUNDLED if app_is_bundled else CONTEXT_CONFIG_PATH,\n )\n if not os.path.exists(self.context_config_path):\n os.makedirs(self.context_config_path)\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.load_gui_config()\n self.load_context_config_names()\n\n def __read_config_file(self, config_name, is_system_config=True) -> dict[str, any]: # type: ignore\n parsed_config = {}\n\n path = self.system_config_path if is_system_config else self.context_config_path\n config_file = os.path.join(path, config_name)\n if os.path.exists(config_file) and os.path.isfile(config_file):\n with open(config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load config ({config_name})!\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def __write_config_file(self, config_name, content, is_system_config=True) -> bool: # type: ignore\n path = self.system_config_path if is_system_config else self.context_config_path\n config_file = os.path.join(path, config_name)\n with open(config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(content, stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write config ({config_name})!\\n{str(e)}\", True\n )\n return False\n\n return True\n\n def load_gui_config(self):\n \"\"\"Fetch GUI config from file and store it for future use\"\"\"\n self.gui_config = self.__read_config_file(GUI_CONFIG)\n return self.gui_config\n\n def save_gui_config(self):\n \"\"\"Write GUI config to file\"\"\"\n return self.__write_config_file(GUI_CONFIG, self.gui_config)\n\n def load_context_config_names(self):\n default_found = False\n file_prefix, file_ending = DEFAULT_CONTEXT_CONFIG.split(\".\")\n\n # Dynamically load all user configuration files from the provided directory\n for file in os.listdir(self.context_config_path):\n # Filter out all non-yaml files\n if file.endswith(f\".{file_ending}\") and file.startswith(f\"{file_prefix}.\"):\n if file == DEFAULT_CONTEXT_CONFIG:\n default_found = True\n else:\n config_name = file.replace(f\"{file_prefix}.\", \"\").replace(\n f\".{file_ending}\", \"\"\n )\n self.contexts.append(config_name)\n\n if not default_found:\n # create default context from the systems example context config\n example_context: str = os.path.join(\n self.system_config_path, EXAMPLE_CONTEXT_CONFIG\n )\n default_context: str = os.path.join(\n self.context_config_path, DEFAULT_CONTEXT_CONFIG\n )\n if os.path.exists(example_context) and os.path.isfile(example_context):\n shutil.copyfile(example_context, default_context)\n\n def get_context_config(self, context=\"\") -> dict[str, any]: # type: ignore\n # default name -> 'config.yaml'\n # context config -> 'config.{context}.yaml'\n file_name = f\"config.{f'{context}.' if context else ''}yaml\"\n config = self.__read_config_file(file_name, False)\n return config"
},
{
"identifier": "WingmanUI",
"path": "gui/root.py",
"snippet": "class WingmanUI(ctk.CTk):\n VIEWS = Literal[\"context\", \"settings\", \"about\"]\n _views: dict[VIEWS, ctk.CTkFrame | None] = dict(\n context=None, settings=None, about=None\n )\n\n def __init__(self, core):\n super().__init__()\n self.core = core\n\n self.about_window = None\n\n ctk.set_appearance_mode(\n self.core.config_manager.gui_config.get(\"appearance\", \"system\")\n )\n # TODO: add themes\n # ctk.set_default_color_theme(path.join(self.core.app_root_dir, \"assets\", \"themes\", \"wingman-ai.json\"))\n\n self.title(\"Wingman AI\")\n self.geometry(\"1024x800+200+150\")\n self.minsize(400, 150)\n # no way to set this on MacOS\n self.iconbitmap(path.join(self.core.app_root_dir, \"assets\", \"wingman-ai.ico\"))\n\n if platform == \"darwin\":\n mac_dock_icon = tk.Image(\n \"photo\",\n file=path.join(\n self.core.app_root_dir, \"assets\", \"icons\", \"wingman-ai.png\"\n ),\n )\n self.iconphoto(True, mac_dock_icon)\n self.menubar = tk.Menu(self)\n self.system_menu = tk.Menu(self.menubar, name=\"apple\")\n self.system_menu.add_command(label=\"Exit Wingman AI\", command=self.quit)\n self.menubar.add_cascade(label=\"System\", menu=self.system_menu)\n self.help_menu = tk.Menu(self.menubar, tearoff=0)\n self.help_menu.add_command(\n label=\"About Wingman AI\", command=lambda: self.show_view(\"about\")\n )\n self.menubar.add_cascade(label=\"Help\", menu=self.help_menu)\n self.config(menu=self.menubar)\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(1, weight=1)\n\n self.header = Header(self, height=74, corner_radius=0)\n self.header.grid(row=0, column=0, sticky=\"we\")\n\n view_grid = {\"row\": 1, \"column\": 0, \"sticky\": \"nesw\"}\n self._views[\"about\"] = AboutView(self, corner_radius=0, fg_color=\"transparent\")\n self._views[\"about\"].grid(**view_grid)\n\n self._views[\"settings\"] = SettingsView(\n self, corner_radius=0, fg_color=\"transparent\"\n )\n self._views[\"settings\"].grid(**view_grid)\n\n self._views[\"context\"] = ContextView(\n self, corner_radius=0, fg_color=\"transparent\"\n )\n self._views[\"context\"].grid(**view_grid)\n\n self.notification_banner = NotificationBanner(self, corner_radius=0)\n self.notification_banner.set_grid_position(row=2, column=0)\n\n def switch_view(self, view: VIEWS, show=True):\n toggle_view = self._views.get(view)\n if isinstance(toggle_view, ctk.CTkFrame):\n if show:\n toggle_view.tkraise()\n else:\n toggle_view.lower()\n\n def show_view(self, view: VIEWS):\n self.switch_view(view, show=True)\n\n def hide_view(self, view: VIEWS):\n self.switch_view(view, show=False)"
},
{
"identifier": "Wingman",
"path": "wingmen/wingman.py",
"snippet": "class Wingman(FileCreator):\n \"\"\"The \"highest\" Wingman base class in the chain. It does some very basic things but is meant to be 'virtual', and so are most its methods, so you'll probably never instantiate it directly.\n\n Instead, you'll create a custom wingman that inherits from this (or a another subclass of it) and override its methods if needed.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n \"\"\"The constructor of the Wingman class. You can override it in your custom wingman.\n\n Args:\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n app_root_dir (str): The path to the root directory of the app. This is where the Wingman executable lives.\n \"\"\"\n\n super().__init__(app_root_dir=app_root_dir, subdir=\"wingman_data\")\n\n self.config = config\n \"\"\"All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\"\"\"\n\n self.secret_keeper = secret_keeper\n \"\"\"A service that allows you to store and retrieve secrets like API keys. It can prompt the user for secrets if necessary.\"\"\"\n\n self.name = name\n \"\"\"The name of the wingman. This is the key you gave it in the config, e.g. \"atc\".\"\"\"\n\n self.audio_player = AudioPlayer()\n \"\"\"A service that allows you to play audio files and add sound effects to them.\"\"\"\n\n self.execution_start: None | float = None\n \"\"\"Used for benchmarking executon times. The timer is (re-)started whenever the process function starts.\"\"\"\n\n self.debug: bool = self.config[\"features\"].get(\"debug_mode\", False)\n \"\"\"If enabled, the Wingman will skip executing any keypresses. It will also print more debug messages and benchmark results.\"\"\"\n\n self.tts_provider = self.config[\"features\"].get(\"tts_provider\")\n \"\"\"The name of the TTS provider you configured in the config.yaml\"\"\"\n\n self.app_root_dir = app_root_dir\n \"\"\"The path to the root directory of the app. This is where the Wingman executable lives.\"\"\"\n\n @staticmethod\n def create_dynamically(\n module_path: str,\n class_name: str,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n **kwargs,\n ):\n \"\"\"Dynamically creates a Wingman instance from a module path and class name\n\n Args:\n module_path (str): The module path, e.g. wingmen.open_ai_wingman. It's like the filepath from root to your custom-wingman.py but with dots instead of slashes and without the .py extension. Case-sensitive!\n class_name (str): The name of the class inside your custom-wingman.py, e.g. OpenAiWingman. Case-sensitive!\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n \"\"\"\n\n module = import_module(module_path)\n DerivedWingmanClass = getattr(module, class_name)\n instance = DerivedWingmanClass(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n **kwargs,\n )\n return instance\n\n def get_record_key(self) -> str:\n \"\"\"Returns the activation or \"push-to-talk\" key for this Wingman.\"\"\"\n return self.config.get(\"record_key\", None)\n\n def print_execution_time(self, reset_timer=False):\n \"\"\"Prints the current time since the execution started (in seconds).\"\"\"\n if self.execution_start:\n execution_stop = time.perf_counter()\n elapsed_seconds = execution_stop - self.execution_start\n printr.print(f\"...took {elapsed_seconds:.2f}s\", tags=\"info\")\n if reset_timer:\n self.start_execution_benchmark()\n\n def start_execution_benchmark(self):\n \"\"\"Starts the execution benchmark timer.\"\"\"\n self.execution_start = time.perf_counter()\n\n # ──────────────────────────────────── Hooks ─────────────────────────────────── #\n\n def validate(self) -> list[str]:\n \"\"\"Use this function to validate params and config before the Wingman is started.\n If you add new config sections or entries to your custom wingman, you should validate them here.\n\n It's a good idea to collect all errors from the base class and not to swallow them first.\n\n If you return errors, your Wingman will be disabled by Tower and not be loaded.\n\n Returns:\n list[str]: A list of error messages or an empty list if everything is okay.\n \"\"\"\n return []\n\n # TODO: this should be async\n def prepare(self):\n \"\"\"This method is called only once when the Wingman is instantiated by Tower.\n It is run AFTER validate() so you can access validated params safely here.\n\n You can override it if you need to load async data from an API or file.\"\"\"\n pass\n\n def reset_conversation_history(self):\n \"\"\"This function is called when the user triggers the ResetConversationHistory command.\n It's a global command that should be implemented by every Wingman that keeps a message history.\n \"\"\"\n\n # ──────────────────────────── The main processing loop ──────────────────────────── #\n\n async def process(self, audio_input_wav: str):\n \"\"\"The main method that gets called when the wingman is activated. This method controls what your wingman actually does and you can override it if you want to.\n\n The base implementation here triggers the transcription and processing of the given audio input.\n If you don't need even transcription, you can just override this entire process method. If you want transcription but then do something in addition, you can override the listed hooks.\n\n Async so you can do async processing, e.g. send a request to an API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Hooks:\n - async _transcribe: transcribe the audio to text\n - async _get_response_for_transcript: process the transcript and return a text response\n - async _play_to_user: do something with the response, e.g. play it as audio\n \"\"\"\n\n self.start_execution_benchmark()\n\n process_result = None\n\n if self.debug:\n printr.print(\"Starting transcription...\", tags=\"info\")\n\n # transcribe the audio.\n transcript, locale = await self._transcribe(audio_input_wav)\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n if transcript:\n printr.print(f\">> (You): {transcript}\", tags=\"violet\")\n\n if self.debug:\n printr.print(\"Getting response for transcript...\", tags=\"info\")\n\n # process the transcript further. This is where you can do your magic. Return a string that is the \"answer\" to your passed transcript.\n process_result, instant_response = await self._get_response_for_transcript(\n transcript, locale\n )\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n actual_response = instant_response or process_result\n printr.print(f\"<< ({self.name}): {actual_response}\", tags=\"green\")\n\n if self.debug:\n printr.print(\"Playing response back to user...\", tags=\"info\")\n\n # the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n await self._play_to_user(str(process_result))\n\n if self.debug:\n self.print_execution_time()\n\n # ───────────────── virtual methods / hooks ───────────────── #\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the audio to text. You can override this method if you want to use a different transcription service.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n tuple[str | None, str | None]: The transcript of the audio file and the detected language as locale (if determined).\n \"\"\"\n return None, None\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Processes the transcript and return a response as text. This where you'll do most of your work.\n Pass the transcript to AI providers and build a conversation. Call commands or APIs. Play temporary results to the user etc.\n\n\n Args:\n transcript (str): The user's spoken text transcribed as text.\n locale (str | None): The language that was detected to be used in the transcript, e.g. \"de-DE\".\n\n Returns:\n A tuple of strings representing the response to a function call and/or an instant response.\n \"\"\"\n return (\"\", \"\")\n\n async def _play_to_user(self, text: str):\n \"\"\"You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n\n Args:\n text (str): The response of your _get_response_for_transcript. This is usually the \"response\" from conversation with the AI.\n \"\"\"\n pass\n\n # ───────────────────────────────── Commands ─────────────────────────────── #\n\n def _get_command(self, command_name: str) -> dict | None:\n \"\"\"Extracts the command with the given name\n\n Args:\n command_name (str): the name of the command you used in the config\n\n Returns:\n {}: The command object from the config\n \"\"\"\n\n command = next(\n (\n item\n for item in self.config.get(\"commands\", [])\n if item[\"name\"] == command_name\n ),\n None,\n )\n return command\n\n def _select_command_response(self, command: dict) -> str | None:\n \"\"\"Returns one of the configured responses of the command. This base implementation returns a random one.\n\n Args:\n command (dict): The command object from the config\n\n Returns:\n str: A random response from the command's responses list in the config.\n \"\"\"\n command_responses = command.get(\"responses\", None)\n if (command_responses is None) or (len(command_responses) == 0):\n return None\n\n return random.choice(command_responses)\n\n def _execute_instant_activation_command(self, transcript: str) -> dict | None:\n \"\"\"Uses a fuzzy string matching algorithm to match the transcript to a configured instant_activation command and executes it immediately.\n\n Args:\n transcript (text): What the user said, transcripted to text. Needs to be similar to one of the defined instant_activation phrases to work.\n\n Returns:\n {} | None: The executed instant_activation command.\n \"\"\"\n\n instant_activation_commands = [\n command\n for command in self.config.get(\"commands\", [])\n if command.get(\"instant_activation\")\n ]\n\n # check if transcript matches any instant activation command. Each command has a list of possible phrases\n for command in instant_activation_commands:\n for phrase in command.get(\"instant_activation\"):\n ratio = SequenceMatcher(\n None,\n transcript.lower(),\n phrase.lower(),\n ).ratio()\n if (\n ratio > 0.8\n ): # if the ratio is higher than 0.8, we assume that the command was spoken\n self._execute_command(command)\n\n if command.get(\"responses\"):\n return command\n return None\n return None\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Triggers the execution of a command. This base implementation executes the keypresses defined in the command.\n\n Args:\n command (dict): The command object from the config to execute\n\n Returns:\n str: the selected response from the command's responses list in the config. \"Ok\" if there are none.\n \"\"\"\n\n if not command:\n return \"Command not found\"\n\n printr.print(f\"❖ Executing command: {command.get('name')}\", tags=\"info\")\n\n if self.debug:\n printr.print(\n \"Skipping actual keypress execution in debug_mode...\", tags=\"warn\"\n )\n\n if len(command.get(\"keys\", [])) > 0 and not self.debug:\n self.execute_keypress(command)\n # TODO: we could do mouse_events here, too...\n\n # handle the global special commands:\n if command.get(\"name\", None) == \"ResetConversationHistory\":\n self.reset_conversation_history()\n\n if not self.debug:\n # in debug mode we already printed the separate execution times\n self.print_execution_time()\n\n return self._select_command_response(command) or \"Ok\"\n\n def execute_keypress(self, command: dict):\n \"\"\"Executes the keypresses defined in the command in order.\n\n pydirectinput uses SIGEVENTS to send keypresses to the OS. This lib seems to be the only way to send keypresses to games reliably.\n\n It only works on Windows. For MacOS, we fall back to PyAutoGUI (which has the exact same API as pydirectinput is built on top of it).\n\n Args:\n command (dict): The command object from the config to execute\n \"\"\"\n\n for entry in command.get(\"keys\", []):\n if entry.get(\"modifier\"):\n key_module.keyDown(entry[\"modifier\"])\n\n if entry.get(\"hold\"):\n key_module.keyDown(entry[\"key\"])\n time.sleep(entry[\"hold\"])\n key_module.keyUp(entry[\"key\"])\n else:\n key_module.press(entry[\"key\"])\n\n if entry.get(\"modifier\"):\n key_module.keyUp(entry[\"modifier\"])\n\n if entry.get(\"wait\"):\n time.sleep(entry[\"wait\"])"
}
] | from os import path
from pynput import keyboard
from services.audio_recorder import AudioRecorder
from services.secret_keeper import SecretKeeper
from services.tower import Tower
from services.printr import Printr
from services.config_manager import ConfigManager
from gui.root import WingmanUI
from wingmen.wingman import Wingman
import sys
import asyncio
import threading | 9,794 |
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
|
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context) | self.tower = Tower( | 2 | 2023-11-15 09:36:06+00:00 | 12k |
derkalle4/python3-idotmatrix-client | core/cmd.py | [
{
"identifier": "Bluetooth",
"path": "core/bluetooth.py",
"snippet": "class Bluetooth:\n address = None\n client = None\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n mtu_size = None\n\n def __init__(self, address):\n self.logging.debug(\"initialize bluetooth for {}\".format(address))\n self.address = address\n\n async def response_handler(self, sender, data):\n \"\"\"Simple response handler which prints the data received.\"\"\"\n self.logging.debug(\"device feedback: {}\".format(list(data)))\n\n async def connect(self):\n self.logging.info(\"connecting to device\")\n try:\n # create client\n self.client = BleakClient(self.address)\n # connect client\n await self.client.connect()\n # get mtu size\n gatt_characteristic = self.client.services.get_characteristic(\n UUID_WRITE_DATA\n )\n self.mtu_size = gatt_characteristic.max_write_without_response_size\n # Initialise Response Message Handler\n await self.client.start_notify(UUID_READ_DATA, self.response_handler)\n except Exception as e:\n self.logging.error(e)\n if self.client.is_connected:\n self.disconnect()\n return False\n return True\n\n async def disconnect(self):\n self.logging.info(\"disconnecting from device\")\n if self.client is not None:\n await self.client.stop_notify(UUID_READ_DATA)\n await self.client.disconnect()\n\n def splitIntoMultipleLists(self, data):\n \"\"\"\n Returns a list containing lists with the elements from `data`.\n It is ensured that the lists have a maximum length of `max_elems_per_list`.\n\n Derived from `private List<byte[]> getSendData4096(byte[] bArr)`\n in `com/tech/idotmatrix/core/data/ImageAgreement1.java:259`.\n \"\"\"\n chunks = []\n len_ = len(data)\n for start in range(0, len_, self.mtu_size):\n end = start + min(len_ - start, self.mtu_size)\n chunks.append(data[start:end])\n return chunks\n\n async def send(self, message):\n # check if connected\n if self.client is None or not self.client.is_connected:\n if not await self.connect():\n return False\n self.logging.debug(\"sending message(s) to device\")\n for data in self.splitIntoMultipleLists(message):\n self.logging.debug(\"trying to send {}\".format(data))\n await self.client.write_gatt_char(\n UUID_WRITE_DATA,\n data,\n )\n time.sleep(0.01)\n return True"
},
{
"identifier": "Chronograph",
"path": "core/idotmatrix/chronograph.py",
"snippet": "class Chronograph:\n def setChronograph(self, mode):\n \"\"\"Starts/Stops the Chronograph.\n\n Args:\n mode (int): 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 9,\n 128,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the chronograph: {}\".format(error))"
},
{
"identifier": "Clock",
"path": "core/idotmatrix/clock.py",
"snippet": "class Clock:\n \"\"\"This class contains the management of the iDotMatrix clock.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def setTimeIndicator(self, enabled=True):\n \"\"\"Sets the time indicator of the clock. Does not seem to work currently (maybe in a future update?).\n It is inside the source code of BleProtocolN.java, but not referenced anywhere.\n\n Args:\n enabled (bool, optional): Whether or not to show the time indicator of the clock. Defaults to True.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 7,\n 128,\n 1 if enabled else 0,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the time indicator: {}\".format(error))\n\n def setClockMode(self, style, visibleDate, hour24, r=255, g=255, b=255):\n \"\"\"set the clock mode of the device\n\n Args:\n style (int): style of the clock\n 0 = default\n 1 = christmas\n 2 = racing\n 3 = inverted full screen\n 4 = animated hourglass\n 5 = frame 1\n 6 = frame 2\n 7 = frame 3\n visibleDate (bool): whether the date should be shown ornot\n hour24 (bool): 12 or 24 hour format\n r (int, optional): color red. Defaults to 255.\n g (int, optional): color green. Defaults to 255.\n b (int, optional): color blue. Defaults to 255.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 8,\n 0,\n 6,\n 1,\n (style | (128 if visibleDate else 0)) | (64 if hour24 else 0),\n int(r) % 256,\n int(g) % 256,\n int(b) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the clock mode: {}\".format(error))"
},
{
"identifier": "Common",
"path": "core/idotmatrix/common.py",
"snippet": "class Common:\n \"\"\"This class contains generic bluetooth functions for the iDotMatrix.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def toggleScreenFreeze(self):\n \"\"\"Freezes or unfreezes the screen.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n return bytearray([4, 0, 3, 0])\n\n def rotate180degrees(self, type=0):\n \"\"\"rotates the screen 180 dregrees\n\n Args:\n type (int): 0 = normal, 1 = rotated. Defaults to 0.\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 6,\n 128,\n int(type) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not rotate the screen of the device: {}\".format(error))\n\n def set_screen_brightness(self, brightness_percent: int) -> None:\n \"\"\"Set screen brightness. Range 5-100 (%)\n\n Args:\n brightness_percent (int): set the brightness in percent\n\n Returns:\n None\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 4,\n 128,\n int(brightness_percent) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the brightness of the screen: {}\".format(error))\n \n def setSpeed(self, speed):\n \"\"\"Sets the speed of ? - not referenced anyhwere in the iDotrMatrix Android App.\n\n Args:\n speed (int): set the speed\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 3,\n 1,\n int(speed) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not change the speed of the device: {}\".format(error))\n\n def setTime(self, year, month, day, hour, minute, second):\n \"\"\"Sets the date and time of the device.\n\n Args:\n year (int): year (4 digits)\n month (int): month\n day (int): day\n hour (int): hour\n minute (int): minute\n second (int): second\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n date = datetime(year, month, day, hour, minute, second)\n return bytearray(\n [\n 11,\n 0,\n 1,\n 128,\n int(year) % 256,\n int(month) % 256,\n int(day) % 256,\n int(int(date.weekday()) + 1) % 256,\n int(hour) % 256,\n int(minute) % 256,\n int(second) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the time of the device: {}\".format(error))\n\n def setJoint(self, mode):\n \"\"\"Currently no Idea what this is doing.\n\n Args:\n mode (int): set the joint mode\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 12,\n 128,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not change the device joint: {}\".format(error))\n \n def set_password(self, password: int) -> None:\n \"\"\"Setting password: 6 digits in range 000000..999999. Reset device to clear\n\n Args:\n password (int): password\n Returns:\n None\n \"\"\"\n \n pwd_high = password // 10000\n pwd_mid = password % 10000 // 100\n pwd_low = password % 100\n \n try:\n return bytearray(\n [\n 8,\n 0,\n 4,\n 2,\n 1,\n pwd_high,\n pwd_mid,\n pwd_low,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the password: {}\".format(error))"
},
{
"identifier": "Countdown",
"path": "core/idotmatrix/countdown.py",
"snippet": "class Countdown:\n \"\"\"This class contains the management of the Countdown of the iDotMatrix device.\"\"\"\n\n def setCountdown(self, mode, minutes, seconds):\n \"\"\"Sets the countdown (and activates or disables it)\n\n Args:\n mode (int): mode of the countdown. 0 = disable, 1 = start, 2 = pause, 3 = restart\n minutes (int): minutes to count down from\n seconds (int): seconds to count down from\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 7,\n 0,\n 8,\n 128,\n int(mode) % 256,\n int(minutes) % 256,\n int(seconds) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the countdown: {}\".format(error))"
},
{
"identifier": "Gif",
"path": "core/idotmatrix/gif.py",
"snippet": "class Gif:\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n\n def load_gif(self, file_path):\n \"\"\"Load a gif file into a byte buffer.\n\n Args:\n file_path (str): path to file\n\n Returns:\n file: returns the file contents\n \"\"\"\n with open(file_path, \"rb\") as file:\n return file.read()\n\n def split_into_chunks(self, data, chunk_size):\n \"\"\"Split the data into chunks of specified size.\n\n Args:\n data (bytearray): data to split into chunks\n chunk_size (int): size of the chunks\n\n Returns:\n list: returns list with chunks of given data input\n \"\"\"\n return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]\n\n def create_payloads(self, gif_data):\n \"\"\"Creates payloads from a GIF file.\n\n Args:\n gif_data (bytearray): data of the gif file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n # TODO: make this function look more nicely :)\n # Calculate CRC of the GIF data\n crc = zlib.crc32(gif_data)\n # header for gif\n header = bytearray(\n [\n 255,\n 255,\n 1,\n 0,\n 0,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 255,\n 5,\n 0,\n 13,\n ]\n )\n # set length\n header[5:9] = int(len(gif_data) + len(header)).to_bytes(4, byteorder=\"little\")\n # add crc\n header[9:13] = crc.to_bytes(4, byteorder=\"little\")\n # Split the GIF data into 4096-byte chunks\n gif_chunks = self.split_into_chunks(gif_data, 4096)\n # build data\n payloads = bytearray()\n for i, chunk in enumerate(gif_chunks):\n header[4] = 2 if i > 0 else 0\n chunk_len = len(chunk) + len(header)\n header[0:2] = chunk_len.to_bytes(2, byteorder=\"little\")\n payloads.extend(header + chunk)\n return payloads\n\n def upload_unprocessed(self, file_path):\n \"\"\"uploads an image without further checks and resizes.\n\n Args:\n file_path (str): path to the image file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n gif_data = self.load_gif(file_path)\n return self.create_payloads(gif_data)\n\n def upload_processed(self, file_path, pixel_size=32):\n \"\"\"uploads a file processed to make sure everything is correct before uploading to the device.\n\n Args:\n file_path (str): path to the image file\n pixel_size (int, optional): amount of pixels (either 16 or 32 makes sense). Defaults to 32.\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n try:\n # Open the gif file\n with PilImage.open(file_path) as img:\n # resize each frame of the gif\n frames = []\n try:\n while True:\n frame = img.copy()\n if frame.size != (pixel_size, pixel_size):\n # Resize the current frame\n frame = frame.resize(\n (pixel_size, pixel_size), PilImage.Resampling.NEAREST\n )\n # Copy the frame and append it to the list\n frames.append(frame.copy())\n # Move to the next frame\n img.seek(img.tell() + 1)\n except EOFError:\n pass # End of sequence\n # Create a BytesIO object to hold the GIF data\n gif_buffer = io.BytesIO()\n # Save the resized image as GIF to the BytesIO object\n frames[0].save(\n gif_buffer,\n format=\"GIF\",\n save_all=True,\n append_images=frames[1:],\n loop=1,\n duration=img.info[\"duration\"],\n disposal=2,\n )\n # Seek to the start of the GIF buffer\n gif_buffer.seek(0)\n # Return the GIF data\n return self.create_payloads(gif_buffer.getvalue())\n except IOError as e:\n self.logging.error(\"could not process gif: {}\".format(e))\n quit()"
},
{
"identifier": "Image",
"path": "core/idotmatrix/image.py",
"snippet": "class Image:\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n\n def show(self, mode=1):\n \"\"\"Enter the DIY draw mode of the iDotMatrix device.\n\n Args:\n mode (int): 0 = disable DIY, 1 = enable DIY, 2 = ?, 3 = ?. Defaults to 1.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 5,\n 0,\n 4,\n 1,\n int(mode) % 256,\n ]\n )\n except BaseException as error:\n self.logging.error(\"could not enter image mode :(\")\n quit()\n\n def load_png(self, file_path):\n \"\"\"Load a PNG file into a byte buffer.\n\n Args:\n file_path (str): path to file\n\n Returns:\n file: returns the file contents\n \"\"\"\n with open(file_path, \"rb\") as file:\n return file.read()\n\n def split_into_chunks(self, data, chunk_size):\n \"\"\"Split the data into chunks of specified size.\n\n Args:\n data (bytearray): data to split into chunks\n chunk_size (int): size of the chunks\n\n Returns:\n list: returns list with chunks of given data input\n \"\"\"\n return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]\n\n def create_payloads(self, png_data):\n \"\"\"Creates payloads from a PNG file.\n\n Args:\n png_data (bytearray): data of the png file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n # Split the PNG data into 4096-byte chunks\n png_chunks = self.split_into_chunks(png_data, 4096)\n # Calculate the arbitrary metadata number\n idk = len(png_data) + len(png_chunks)\n idk_bytes = struct.pack(\"h\", idk) # convert to 16bit signed int\n png_len_bytes = struct.pack(\"i\", len(png_data))\n # build data\n payloads = bytearray()\n for i, chunk in enumerate(png_chunks):\n payload = (\n idk_bytes\n + bytearray(\n [\n 0,\n 0,\n 2 if i > 0 else 0,\n ]\n )\n + png_len_bytes\n + chunk\n )\n payloads.extend(payload)\n return payloads\n\n def upload_unprocessed(self, file_path):\n \"\"\"uploads an image without further checks and resizes.\n\n Args:\n file_path (str): path to the image file\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n png_data = self.load_png(file_path)\n return self.create_payloads(png_data)\n\n def upload_processed(self, file_path, pixel_size=32):\n \"\"\"uploads a file processed and makes sure everything is correct.\n\n Args:\n file_path (str): path to the image file\n pixel_size (int, optional): amount of pixels (either 16 or 32 makes sense). Defaults to 32.\n\n Returns:\n bytearray: returns bytearray payload\n \"\"\"\n try:\n # Open the image file\n with PilImage.open(file_path) as img:\n # Resize the image\n if img.size != (pixel_size, pixel_size):\n img = img.resize(\n (pixel_size, pixel_size), PilImage.Resampling.LANCZOS\n )\n # Create a BytesIO object to hold the PNG data\n png_buffer = io.BytesIO()\n # Save the resized image as PNG to the BytesIO object\n img.save(png_buffer, format=\"PNG\")\n # Seek to the start of the PNG buffer\n png_buffer.seek(0)\n # Return the PNG data\n return self.create_payloads(png_buffer.getvalue())\n except IOError as e:\n self.logging.error(\"could not process image: {}\".format(e))\n quit()"
},
{
"identifier": "FullscreenColor",
"path": "core/idotmatrix/fullscreenColor.py",
"snippet": "class FullscreenColor:\n \"\"\"This class contains the management of the iDotMatrix fullscreen color mode.\n Based on the BleProtocolN.java file of the iDotMatrix Android App.\n \"\"\"\n\n def setColor(self, r=0, g=0, b=0):\n \"\"\"Sets the fullscreen color of the screen of the device\n\n Args:\n r (int, optional): color red. Defaults to 0.\n g (int, optional): color green. Defaults to 0.\n b (int, optional): color blue. Defaults to 0.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 7,\n 0,\n 2,\n 2,\n int(r) % 256,\n int(g) % 256,\n int(b) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the color: {}\".format(error))"
},
{
"identifier": "MusicSync",
"path": "core/idotmatrix/musicSync.py",
"snippet": "class MusicSync:\n def setMicType(self, type):\n \"\"\"Set the microphone type. Not referenced anywhere in the iDotMatrix Android App. So not used atm.\n\n Args:\n type (int): type of the Microphone. Unknown what values can be used.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 6,\n 0,\n 11,\n 128,\n int(type) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the microphone type: {}\".format(error))\n\n def sendImageRythm(self, value1):\n \"\"\"Set the image rythm. Not referenced anywhere in the iDotMatrix Android App. When used (tested with values up to 10)\n it displays a stick figure which dances if the value1 gets changed often enough to a different one.\n\n Args:\n value1 (int): type of the rythm? Unknown what values can be used.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n 6,\n 0,\n 0,\n 2,\n int(value1) % 256,\n 1,\n ]\n )\n except BaseException as error:\n logging.error(\"could not set the image rythm: {}\".format(error))\n\n def sendRhythm(self, mode, byteArray):\n \"\"\"Used to send synchronized Microphone sound data to the device and visualizing it. Is handled in MicrophoneActivity.java of the\n iDotMatrix Android App. Will not be implemented here because I have no plans to support the computer microphone. The device\n has an integrated microphone which is able to react to sound.\n\n Args:\n mode (int): mode of the rythm.\n byteArray (byteArray): actual microphone sound data for the visualization.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return byteArray\n except BaseException as error:\n logging.error(\"could not set the rythm: {}\".format(error))\n\n def stopRythm(self):\n \"\"\"Stops the Microhpone Rythm on the iDotMatrix device.\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n return bytearray([6, 0, 0, 2, 0, 0])"
},
{
"identifier": "Scoreboard",
"path": "core/idotmatrix/scoreboard.py",
"snippet": "class Scoreboard:\n \"\"\"This class contains the Scorboard management of the iDotMatrix device.\"\"\"\n\n def setScoreboard(self, count1, count2):\n \"\"\"Set the scoreboard of the device.\n\n Args:\n count1 (int): first counter, max: 999 (buffer overflow, if more! -> maybe RCE? :D)\n count2 (int): second counter, max: 999 (buffer overflow, if more! -> maybe RCE? :D)\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n bytearray_count1 = struct.pack(\"!h\", int(count1))\n bytearray_count2 = struct.pack(\"!h\", int(count2))\n return bytearray(\n [\n 8,\n 0,\n 10,\n 128,\n int(bytearray_count1[1]) % 256,\n int(bytearray_count1[0]) % 256,\n int(bytearray_count2[1]) % 256,\n int(bytearray_count2[0]) % 256,\n ]\n )\n except BaseException as error:\n logging.error(\"could not update the scoreboard: {}\".format(error))"
},
{
"identifier": "Graffiti",
"path": "core/idotmatrix/graffiti.py",
"snippet": "class Graffiti:\n \"\"\"This class contains the Graffiti controls for the iDotMatrix device.\"\"\"\n\n def setPixelColor(self, r, g, b, x, y):\n \"\"\"Set the scoreboard of the device.\n\n Args:\n r (int): color red value\n g (int): color green value\n b (int): color blue value\n x (int): pixel x position\n y (int): pixel y position\n\n Returns:\n _type_: byte array of the command which needs to be sent to the device\n \"\"\"\n try:\n return bytearray(\n [\n ###START COMMAND####\n 10,\n 0,\n 5,\n 1,\n 0,\n ###END COMMAND####\n r, ###COLOR R\n g, ###COLOR G\n b, ###COLOR B\n x, ###PIXEL X\n y, ###PIXEL Y\n ]\n )\n except BaseException as error:\n logging.error(\"could not update the Graffiti Board: {}\".format(error))"
}
] | from datetime import datetime
from PIL import Image
from .bluetooth import Bluetooth
from .idotmatrix.chronograph import Chronograph
from .idotmatrix.clock import Clock
from .idotmatrix.common import Common
from .idotmatrix.countdown import Countdown
from .idotmatrix.gif import Gif
from .idotmatrix.image import Image
from .idotmatrix.fullscreenColor import FullscreenColor
from .idotmatrix.musicSync import MusicSync
from .idotmatrix.scoreboard import Scoreboard
from .idotmatrix.graffiti import Graffiti
import logging
import os
import time | 8,148 | parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
| # python imports
# idotmatrix imports
class CMD:
bluetooth = None
logging = logging.getLogger("idotmatrix." + __name__)
def add_arguments(self, parser):
# test
parser.add_argument(
"--test",
action="store_true",
help="run the test function from the command line class",
)
# time sync
parser.add_argument(
"--sync-time",
action="store_true",
help="sync time to device",
)
parser.add_argument(
"--set-time",
action="store",
help="optionally set time to sync to device (use with --sync-time)",
default=datetime.now().strftime("%d-%m-%Y-%H:%M:%S"),
)
# device screen rotation
parser.add_argument(
"--rotate180degrees",
action="store",
help="enable 180 degree device rotation (true = enable, false = disable)",
)
# screen toggle
parser.add_argument(
"--togglescreen",
action="store_true",
help="toggles the screen on or off",
)
# brightness
parser.add_argument(
"--set-brightness",
action="store",
help="sets the brightness of the screen in percent: range 5..100",
)
# password
parser.add_argument(
"--set-password",
action="store",
help="sets password",
)
# chronograph
parser.add_argument(
"--chronograph",
action="store",
help="sets the chronograph mode: 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause",
)
# clock
parser.add_argument(
"--clock",
action="store",
help="sets the clock mode: 0 = default, 1 = christmas, 2 = racing, 3 = inverted full screen, 4 = animated hourglass, 5 = frame 1, 6 = frame 2, 7 = frame 3",
)
parser.add_argument(
"--clock-with-date",
action="store_true",
help="shows the current date in addition to the current time.",
)
parser.add_argument(
"--clock-24h",
action="store_true",
help="shows the current time in 24h format.",
)
parser.add_argument(
"--clock-color",
action="store",
help="sets the color of the clock. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
default="255-255-255",
)
# countdown
parser.add_argument(
"--countdown",
action="store",
help="sets the countdown mode: 0 = disable, 1 = start, 2 = pause, 3 = restart",
)
parser.add_argument(
"--countdown-time",
action="store",
help="sets the countdown mode: <MINUTES>-<SECONDS> (example: 10-30)",
default="5-0",
)
# fullscreen color
parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red | await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0)) | 10 | 2023-11-13 14:04:21+00:00 | 12k |
wjun0830/CGDETR | cg_detr/inference.py | [
{
"identifier": "AverageMeter",
"path": "utils/basic_utils.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "TestOptions",
"path": "cg_detr/config.py",
"snippet": "class TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n\n def initialize(self):\n BaseOptions.initialize(self)\n # also need to specify --eval_split_name\n self.parser.add_argument(\"--eval_id\", type=str, help=\"evaluation id\")\n self.parser.add_argument(\"--eval_results_dir\", type=str, default=None,\n help=\"dir to save results, if not set, fall back to training results_dir\")\n self.parser.add_argument(\"--model_dir\", type=str,\n help=\"dir contains the model file, will be converted to absolute path afterwards\")"
},
{
"identifier": "build_model",
"path": "cg_detr/model.py",
"snippet": "def build_model(args):\n device = torch.device(args.device)\n\n transformer = build_transformer(args)\n position_embedding, txt_position_embedding = build_position_encoding(args)\n\n if args.a_feat_dir is None:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n else:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n aud_dim=args.a_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n\n matcher = build_matcher(args)\n weight_dict = {\"loss_span\": args.span_loss_coef,\n \"loss_giou\": args.giou_loss_coef,\n \"loss_label\": args.label_loss_coef,\n \"loss_saliency\": args.lw_saliency,\n \"loss_ms_align\": args.lw_ms_align,\n \"loss_distill\": args.lw_distill,\n \"loss_orthogonal_dummy\":args.lw_distill}\n if args.contrastive_align_loss:\n weight_dict[\"loss_contrastive_align\"] = args.contrastive_align_loss_coef\n\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items() if k != \"loss_saliency\"})\n weight_dict.update(aux_weight_dict)\n\n losses = ['spans', 'labels', 'saliency', 'ms_align', 'distill', 'orthogonal_dummy']\n if args.contrastive_align_loss:\n losses += [\"contrastive_align\"]\n \n # For highlight detection datasets\n use_matcher = not (args.dset_name in ['youtube_uni', 'tvsum'])\n \n criterion = SetCriterion(\n matcher=matcher, weight_dict=weight_dict, losses=losses,\n eos_coef=args.eos_coef, temperature=args.temperature,\n span_loss_type=args.span_loss_type, max_v_l=args.max_v_l,\n saliency_margin=args.saliency_margin, use_matcher=use_matcher, args=args\n )\n criterion.to(device)\n return model, criterion"
},
{
"identifier": "span_cxw_to_xx",
"path": "cg_detr/span_utils.py",
"snippet": "def span_cxw_to_xx(cxw_spans):\n \"\"\"\n Args:\n cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)\n\n >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])\n >>> span_cxw_to_xx(spans)\n tensor([[0.0000, 1.0000],\n [0.2000, 0.4000]])\n >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])\n >>> span_cxw_to_xx(spans)\n tensor([[[0.0000, 1.0000],\n [0.2000, 0.4000]]])\n \"\"\"\n x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]\n x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]\n return torch.stack([x1, x2], dim=-1)"
},
{
"identifier": "StartEndDataset",
"path": "cg_detr/start_end_dataset.py",
"snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)"
},
{
"identifier": "start_end_collate",
"path": "cg_detr/start_end_dataset.py",
"snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data"
},
{
"identifier": "prepare_batch_inputs",
"path": "cg_detr/start_end_dataset.py",
"snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets"
},
{
"identifier": "PostProcessorDETR",
"path": "cg_detr/postprocessing_cg_detr.py",
"snippet": "class PostProcessorDETR:\n def __init__(self, clip_length=2, min_ts_val=0, max_ts_val=150,\n min_w_l=2, max_w_l=70, move_window_method=\"center\",\n process_func_names=(\"clip_window_l\", \"clip_ts\", \"round_multiple\")):\n self.clip_length = clip_length\n self.min_ts_val = min_ts_val\n self.max_ts_val = max_ts_val\n self.min_w_l = min_w_l\n self.max_w_l = max_w_l\n self.move_window_method = move_window_method\n self.process_func_names = process_func_names\n self.name2func = dict(\n clip_ts=self.clip_min_max_timestamps,\n round_multiple=self.round_to_multiple_clip_lengths,\n clip_window_l=self.clip_window_lengths\n )\n\n def __call__(self, lines):\n processed_lines = []\n for line in tqdm(lines, desc=f\"convert to multiples of clip_length={self.clip_length}\"):\n windows_and_scores = torch.tensor(line[\"pred_relevant_windows\"])\n windows = windows_and_scores[:, :2]\n for func_name in self.process_func_names:\n windows = self.name2func[func_name](windows)\n line[\"pred_relevant_windows\"] = torch.cat(\n [windows, windows_and_scores[:, 2:3]], dim=1).tolist()\n line[\"pred_relevant_windows\"] = [e[:2] + [float(f\"{e[2]:.4f}\")] for e in line[\"pred_relevant_windows\"]]\n processed_lines.append(line)\n return processed_lines\n\n def clip_min_max_timestamps(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure timestamps for all windows is within [min_val, max_val], clip is out of boundaries.\n \"\"\"\n return torch.clamp(windows, min=self.min_ts_val, max=self.max_ts_val)\n\n def round_to_multiple_clip_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure the final window timestamps are multiples of `clip_length`\n \"\"\"\n return torch.round(windows / self.clip_length) * self.clip_length\n\n def clip_window_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) np.ndarray\n ensure the final window duration are within [self.min_w_l, self.max_w_l]\n \"\"\"\n window_lengths = windows[:, 1] - windows[:, 0]\n small_rows = window_lengths < self.min_w_l\n if torch.sum(small_rows) > 0:\n windows = self.move_windows(\n windows, small_rows, self.min_w_l, move_method=self.move_window_method)\n large_rows = window_lengths > self.max_w_l\n if torch.sum(large_rows) > 0:\n windows = self.move_windows(\n windows, large_rows, self.max_w_l, move_method=self.move_window_method)\n return windows\n\n @classmethod\n def move_windows(cls, windows, row_selector, new_length, move_method=\"left\"):\n \"\"\"\n Args:\n windows:\n row_selector:\n new_length:\n move_method: str,\n left: keep left unchanged\n center: keep center unchanged\n right: keep right unchanged\n\n Returns:\n\n \"\"\"\n # import ipdb;\n # ipdb.set_trace()\n if move_method == \"left\":\n windows[row_selector, 1] = windows[row_selector, 0] + new_length\n elif move_method == \"right\":\n windows[row_selector, 0] = windows[row_selector, 1] - new_length\n elif move_method == \"center\":\n center = (windows[row_selector, 1] + windows[row_selector, 0]) / 2.\n windows[row_selector, 0] = center - new_length / 2.\n windows[row_selector, 1] = center + new_length / 2.\n return windows"
},
{
"identifier": "eval_submission",
"path": "standalone_eval/eval.py",
"snippet": "def eval_submission(submission, ground_truth, verbose=True, match_number=True):\n \"\"\"\n Args:\n submission: list(dict), each dict is {\n qid: str,\n query: str,\n vid: str,\n pred_relevant_windows: list([st, ed]),\n pred_saliency_scores: list(float), len == #clips in video.\n i.e., each clip in the video will have a saliency score.\n }\n ground_truth: list(dict), each dict is {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17]\n \"saliency_scores\": [[4, 4, 2], [3, 4, 2], [2, 2, 3], [2, 2, 2], [0, 1, 3]]\n each sublist corresponds to one clip in relevant_clip_ids.\n The 3 elements in the sublist are scores from 3 different workers. The\n scores are in [0, 1, 2, 3, 4], meaning [Very Bad, ..., Good, Very Good]\n }\n verbose:\n match_number:\n\n Returns:\n\n \"\"\"\n pred_qids = set([e[\"qid\"] for e in submission])\n gt_qids = set([e[\"qid\"] for e in ground_truth])\n if match_number:\n assert pred_qids == gt_qids, \\\n f\"qids in ground_truth and submission must match. \" \\\n f\"use `match_number=False` if you wish to disable this check\"\n else: # only leave the items that exists in both submission and ground_truth\n shared_qids = pred_qids.intersection(gt_qids)\n submission = [e for e in submission if e[\"qid\"] in shared_qids]\n ground_truth = [e for e in ground_truth if e[\"qid\"] in shared_qids]\n\n eval_metrics = {}\n eval_metrics_brief = OrderedDict()\n if \"pred_relevant_windows\" in submission[0]:\n moment_ret_scores = eval_moment_retrieval(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(moment_ret_scores)\n moment_ret_scores_brief = {\n \"MR-full-mAP\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"average\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.75\"],\n \"MR-short-mAP\": moment_ret_scores[\"short\"][\"MR-mAP\"][\"average\"],\n \"MR-middle-mAP\": moment_ret_scores[\"middle\"][\"MR-mAP\"][\"average\"],\n \"MR-long-mAP\": moment_ret_scores[\"long\"][\"MR-mAP\"][\"average\"],\n \"MR-full-mIoU\": moment_ret_scores[\"full\"][\"MR-mIoU\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.3\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.7\"],\n }\n eval_metrics_brief.update(\n sorted([(k, v) for k, v in moment_ret_scores_brief.items()], key=lambda x: x[0]))\n\n if \"pred_saliency_scores\" in submission[0]:\n highlight_det_scores = eval_highlight(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(highlight_det_scores)\n highlight_det_scores_brief = dict([\n (f\"{k}-{sub_k.split('-')[1]}\", v[sub_k])\n for k, v in highlight_det_scores.items() for sub_k in v])\n eval_metrics_brief.update(highlight_det_scores_brief)\n\n # sort by keys\n final_eval_metrics = OrderedDict()\n final_eval_metrics[\"brief\"] = eval_metrics_brief\n final_eval_metrics.update(sorted([(k, v) for k, v in eval_metrics.items()], key=lambda x: x[0]))\n return final_eval_metrics"
},
{
"identifier": "save_jsonl",
"path": "utils/basic_utils.py",
"snippet": "def save_jsonl(data, filename):\n \"\"\"data is a list\"\"\"\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([json.dumps(e) for e in data]))"
},
{
"identifier": "save_json",
"path": "utils/basic_utils.py",
"snippet": "def save_json(data, filename, save_pretty=False, sort_keys=False):\n with open(filename, \"w\") as f:\n if save_pretty:\n f.write(json.dumps(data, indent=4, sort_keys=sort_keys))\n else:\n json.dump(data, f)"
},
{
"identifier": "temporal_nms",
"path": "utils/temporal_nms.py",
"snippet": "def temporal_nms(predictions, nms_thd, max_after_nms=100):\n \"\"\"\n Args:\n predictions: list(sublist), each sublist is [st (float), ed(float), score (float)],\n note larger scores are better and are preserved. For metrics that are better when smaller,\n please convert to its negative, e.g., convert distance to negative distance.\n nms_thd: float in [0, 1]\n max_after_nms:\n Returns:\n predictions_after_nms: list(sublist), each sublist is [st (float), ed(float), score (float)]\n References:\n https://github.com/wzmsltw/BSN-boundary-sensitive-network/blob/7b101fc5978802aa3c95ba5779eb54151c6173c6/Post_processing.py#L42\n \"\"\"\n if len(predictions) == 1: # only has one prediction, no need for nms\n return predictions\n\n predictions = sorted(predictions, key=lambda x: x[2], reverse=True) # descending order\n\n tstart = [e[0] for e in predictions]\n tend = [e[1] for e in predictions]\n tscore = [e[2] for e in predictions]\n rstart = []\n rend = []\n rscore = []\n while len(tstart) > 1 and len(rscore) < max_after_nms: # max 100 after nms\n idx = 1\n while idx < len(tstart): # compare with every prediction in the list.\n if compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd:\n # rm highly overlapped lower score entries.\n tstart.pop(idx)\n tend.pop(idx)\n tscore.pop(idx)\n # print(\"--------------------------------\")\n # print(compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]))\n # print([tstart[0], tend[0]], [tstart[idx], tend[idx]])\n # print(tstart.pop(idx), tend.pop(idx), tscore.pop(idx))\n else:\n # move to next\n idx += 1\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n if len(rscore) < max_after_nms and len(tstart) >= 1: # add the last, possibly empty.\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n predictions_after_nms = [[st, ed, s] for s, st, ed in zip(rscore, rstart, rend)]\n return predictions_after_nms"
}
] | import pprint
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import logging
from tqdm import tqdm, trange
from collections import OrderedDict, defaultdict
from utils.basic_utils import AverageMeter
from torch.utils.data import DataLoader
from cg_detr.config import TestOptions
from cg_detr.model import build_model
from cg_detr.span_utils import span_cxw_to_xx
from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs
from cg_detr.postprocessing_cg_detr import PostProcessorDETR
from standalone_eval.eval import eval_submission
from utils.basic_utils import save_jsonl, save_json
from utils.temporal_nms import temporal_nms
from collections import OrderedDict
from sys import argv | 10,719 |
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
mr_res_after_nms = []
for e in mr_res:
|
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
mr_res_after_nms = []
for e in mr_res: | e["pred_relevant_windows"] = temporal_nms( | 11 | 2023-11-10 12:45:25+00:00 | 12k |
AdmTal/music-graphs | src/generate_music_graph.py | [
{
"identifier": "AnimationFrames",
"path": "src/animation_stuff.py",
"snippet": "class AnimationFrames:\n \"\"\"\n Helper object to organize layered frames in order to produce an animation.\n self._data is a Dict. The Keys are \"layer_ids\", and the values are Lists of \"images\".\n They are not really images, they are instead closures that can be executed to create an image.\n \"\"\"\n\n def __init__(self):\n self._data = {}\n\n def __len__(self):\n if not len(self._data):\n return 0\n first_layer = list(self._data.keys())[0]\n return len(self._data[first_layer])\n\n def items(self):\n return self._data.items()\n\n def _ensure_layer_length(self, length):\n for layer_id in self._data:\n self._data[layer_id].extend([None] * (length - len(self._data[layer_id])))\n\n def add_frames_to_layer(self, layer_id, frame_index, images):\n # Find the new length, which is the longer of the current max length or the new frame_index + number of images\n new_length = max(\n frame_index + len(images),\n max((len(frames) for frames in self._data.values()), default=0),\n )\n\n # Extend all layers to the new length\n self._ensure_layer_length(new_length)\n\n # Add new layer if it doesn't exist\n if layer_id not in self._data:\n self._data[layer_id] = [None] * new_length\n\n # Set images at the correct frame index\n for i, image in enumerate(images):\n self._data[layer_id][frame_index + i] = image\n\n def __str__(self):\n return str(self._data)"
},
{
"identifier": "cleanup_cache_dir",
"path": "src/cache_stuff.py",
"snippet": "def cleanup_cache_dir(cache_dir):\n shutil.rmtree(cache_dir)"
},
{
"identifier": "get_cache_dir",
"path": "src/cache_stuff.py",
"snippet": "def get_cache_dir():\n global _cache_dir_created, _cache_dir\n if not _cache_dir_created:\n _cache_dir = f\".cache/{uuid4()}\"\n os.makedirs(_cache_dir, exist_ok=True)\n _cache_dir_created = True\n return _cache_dir"
},
{
"identifier": "animate_bezier_point",
"path": "src/graph_stuff.py",
"snippet": "def animate_bezier_point(\n base_image,\n offsets,\n theme,\n track,\n points,\n frame_number,\n animation_length_in_frames,\n):\n overlay_image = Image.new(\n \"RGBA\",\n base_image.size,\n color=None,\n )\n draw = ImageDraw.Draw(overlay_image)\n\n x_offset, y_offset = offsets\n\n t = frame_number / animation_length_in_frames\n point = bezier_point(t, [points[i : i + 2] for i in range(0, len(points), 2)])\n point_center = (x_offset + point[0], y_offset + point[1])\n\n # Draw the 3D-looking circle\n for i in range(theme.ball_radius(track) // 2):\n # Calculate the color gradient based on the specified ball color\n draw.ellipse(\n [\n (point_center[0] - i, point_center[1] - i),\n (point_center[0] + i, point_center[1] + i),\n ],\n fill=theme.ball_color(track),\n outline=hex_to_rgb(theme.ball_stroke_color(track)),\n width=theme.ball_stroke_width(track),\n )\n\n blur_max = theme.ball_g_blur_max(track)\n if blur_max:\n blur_radius = min(\n animation_length_in_frames - frame_number,\n theme.ball_g_blur_max(track) / (frame_number + 1),\n )\n overlay_image = overlay_image.filter(\n ImageFilter.GaussianBlur(radius=blur_radius)\n )\n\n # Composite the transparent overlay onto the base image\n return Image.alpha_composite(\n base_image.convert(\"RGBA\"),\n overlay_image,\n )"
},
{
"identifier": "animate_ellipsis_blur",
"path": "src/graph_stuff.py",
"snippet": "def animate_ellipsis_blur(\n base_image,\n points,\n frame_number,\n offsets,\n theme,\n track,\n animation_len,\n velocity,\n):\n image = base_image.copy()\n draw = ImageDraw.Draw(image)\n\n x_offset, y_offset = offsets\n x0, y0, w, h = points\n x0 += x_offset\n y0 += y_offset\n\n # Calculate the increase in size\n w_increase = w * theme.note_increase_size(track) * (velocity / 127)\n h_increase = h * theme.note_increase_size(track) * (velocity / 127)\n\n # Define the bounding box with the increased size\n bounding_box = [\n x0 - w - w_increase / 2,\n y0 - h - h_increase / 2,\n x0 + w + w_increase / 2,\n y0 + h + h_increase / 2,\n ]\n\n # Draw the initial ellipse\n draw.ellipse(\n bounding_box,\n outline=hex_to_rgb(theme.note_color(track)),\n width=theme.note_stroke_width(track),\n )\n\n # Determine the blur radius for this frame\n blur_strength = (frame_number / animation_len) * velocity\n blur_radius = max(1, blur_strength)\n\n # Create a mask for the ellipse to constrain the blur effect\n mask = Image.new(\"L\", image.size, 0)\n mask_draw = ImageDraw.Draw(mask)\n mask_draw.ellipse(bounding_box, fill=255)\n\n # Apply the blur effect on the mask\n mask_blurred = mask.filter(ImageFilter.GaussianBlur(blur_radius))\n\n # Create a solid image for the blur color\n ellipse = Image.new(\"RGBA\", image.size, hex_to_rgb(theme.note_color(track)))\n\n # Composite the blurred mask with the ellipse onto the base image\n image.paste(ellipse, mask=mask_blurred)\n\n return image"
},
{
"identifier": "draw_fading_bezier_curve",
"path": "src/graph_stuff.py",
"snippet": "def draw_fading_bezier_curve(\n base_image,\n offsets,\n theme,\n points,\n frame_number,\n track,\n animation_len,\n):\n # Create a new transparent image to draw the Bézier curve\n overlay_image = Image.new(\"RGBA\", base_image.size, (255, 255, 255, 0))\n draw = ImageDraw.Draw(overlay_image)\n\n # Calculate alpha value for current frame\n alpha = calculate_alpha(frame_number, animation_len)\n\n # Split the points into pairs and apply offsets\n points = [points[i : i + 2] for i in range(0, len(points), 2)]\n points = [(c[0] + offsets[0], c[1] + offsets[1]) for c in points]\n\n # Split the curve into segments\n segments = 300 * len(points)\n curve = [bezier_point(t / segments, points) for t in range(segments + 1)]\n\n # Draw the border/shadow\n border_width = theme.chord_line_width(track) * 2\n border_rgba_color = hex_to_rgba(theme.chord_line_border_color(track), alpha)\n for i in range(segments):\n draw.line(\n (\n curve[i],\n curve[i + 1],\n ),\n fill=border_rgba_color,\n width=border_width,\n )\n overlay_image = overlay_image.filter(ImageFilter.GaussianBlur(radius=5))\n draw = ImageDraw.Draw(overlay_image)\n\n # Convert hex color to RGBA with alpha for main line\n rgba_color = hex_to_rgba(theme.chord_line_color(track), alpha)\n\n # Draw the main line with fading effect\n for i in range(segments):\n draw.line(\n (\n curve[i],\n curve[i + 1],\n ),\n fill=rgba_color,\n width=theme.chord_line_width(track),\n )\n\n # Composite the transparent overlay onto the base image\n return Image.alpha_composite(\n base_image.convert(\"RGBA\"),\n overlay_image,\n )"
},
{
"identifier": "parse_graph",
"path": "src/graph_stuff.py",
"snippet": "def parse_graph(\n graph,\n theme: Theme,\n):\n temp_filename = f\"{get_cache_dir()}/graph.gv\"\n graph.filename = temp_filename\n graph.render(view=False)\n file_contents = open(f\"{temp_filename}.xdot\").read()\n\n lines = compact_dot_format(file_contents).split(\"\\n\")\n\n nodes = {}\n edges = defaultdict(dict)\n\n nodes_to_draw = []\n text_to_draw = []\n\n for line in lines[1:-1]:\n line_id, attributes = line.split(\"[\")\n line_id = line_id.strip().replace('\"', \"\")\n attributes = attributes.replace(\"];\", \"\")\n\n attrs_dict = split_attributes(attributes)\n\n if line_id == \"graph\":\n draw = parse_draw(attrs_dict[\"_draw_\"], theme.dpi)\n host_image = Image.new(\n \"RGBA\",\n (\n theme.width,\n theme.height,\n ),\n color=None,\n )\n\n # offsets\n host_width, host_height = host_image.size\n width, height = get_dimensions(draw.p_points)\n x = (host_width - width) // 2\n y = (host_height - height) // 2\n offsets = (x, y)\n\n graph_image = Image.new(\n \"RGBA\",\n (\n theme.width,\n theme.height,\n ),\n color=(0, 0, 0, 0),\n )\n\n if theme.background_image:\n bg_image = Image.open(theme.background_image).convert(\"RGBA\")\n bg_image = bg_image.resize(\n (theme.width, theme.height),\n Image.Resampling.LANCZOS,\n )\n host_image.paste(bg_image, (0, 0))\n else:\n # Create a new white image if background_image is false\n bg_image = Image.new(\n \"RGBA\",\n (theme.width, theme.height),\n hex_to_rgb(theme.background_color),\n )\n host_image.paste(bg_image, (0, 0))\n\n if \"_draw_\" in attrs_dict:\n draw = parse_draw(attrs_dict[\"_draw_\"], theme.dpi)\n if draw.e_points:\n nodes_to_draw.append(\n [\n draw.e_points,\n theme.node_outline_color,\n theme.node_fill_color,\n theme.node_shadow_color,\n theme.node_shadow_size,\n theme.graph_line_width,\n ]\n )\n nodes[line_id] = draw\n\n if draw.b_points:\n a, b = line_id.split(\" -- \")\n edges[a][b] = draw\n edges[b][a] = draw\n if theme.show_lines:\n draw_bezier_curve(\n offsets,\n graph_image,\n draw.b_points,\n theme.graph_line_color,\n theme.graph_line_width,\n theme.graph_line_blur,\n )\n\n if \"_ldraw_\" in attrs_dict and not theme.hide_letters:\n ldraw = parse_ldraw(attrs_dict[\"_ldraw_\"], theme.dpi)\n\n if len(ldraw.text) == 2:\n dx = theme.text_location_offsets.len_2.x\n dy = theme.text_location_offsets.len_2.y\n else:\n dx = theme.text_location_offsets.len_1.x\n dy = theme.text_location_offsets.len_1.y\n\n text_to_draw.append(\n [\n ldraw.text,\n ldraw.text_x + dx,\n ldraw.text_y + dy,\n theme.font,\n theme.font_size,\n theme.node_text_color,\n theme.node_text_outline_color,\n theme.node_text_stroke_width,\n ]\n )\n\n for args in nodes_to_draw:\n draw_ellipse(offsets, graph_image, *args)\n\n for args in text_to_draw:\n draw_centered_text(offsets, graph_image, *args)\n\n paste_center(host_image, graph_image)\n\n return host_image, nodes, edges, offsets"
},
{
"identifier": "get_node_positions",
"path": "src/graph_stuff.py",
"snippet": "def get_node_positions(graph):\n \"\"\"Draw a graph to a file, load it, then parse it's `node[pos] values, and return them\"\"\"\n temp_filename = f\"{get_cache_dir()}/graph_order\"\n graph.filename = temp_filename\n graph.render(view=False)\n file_contents = open(f\"{temp_filename}.plain\").read()\n lines = file_contents.split(\"\\n\")\n\n nodes = {}\n\n for line in lines:\n tokens = line.split(\" \")\n if tokens[0] != \"node\":\n continue\n _, node, x, y = tokens[0], tokens[1], tokens[2], tokens[3]\n node = node.replace('\"', \"\")\n nodes[node] = f\"{x},{y}!\"\n\n return nodes"
},
{
"identifier": "get_note_start_times_in_frames",
"path": "src/midi_stuff.py",
"snippet": "def get_note_start_times_in_frames(\n midi_file_path,\n fps,\n squash_tracks=False,\n group_notes_by_track=False,\n):\n # Load the MIDI file\n midi_data = pretty_midi.PrettyMIDI(midi_file_path)\n\n track_events_frames = defaultdict(lambda: defaultdict(list))\n\n for i, instrument in enumerate(midi_data.instruments, start=1):\n for note in instrument.notes:\n # Calculate the start time of the note in seconds and convert to frames\n start_time = note.start\n end_time = note.end\n frame = int(start_time * fps)\n note_length_in_frames = int((end_time - start_time) * fps)\n\n track_name = 0\n if group_notes_by_track:\n track_name = i + 1\n note_value = get_note(track_name, note.pitch)\n\n note_tuple = (\n note_value,\n note.velocity,\n note_length_in_frames,\n )\n if squash_tracks:\n track_events_frames[f\"track_2\"][frame].append(note_tuple)\n else:\n track_events_frames[f\"track_{track_name}\"][frame].append(note_tuple)\n\n return track_events_frames"
},
{
"identifier": "TRACK_NOTE_DELIMITER",
"path": "src/midi_stuff.py",
"snippet": "TRACK_NOTE_DELIMITER = \"#\""
},
{
"identifier": "Theme",
"path": "src/theme_stuff.py",
"snippet": "class Theme:\n def __init__(\n self,\n theme_file,\n defaults_file,\n ):\n with open(theme_file, \"r\") as stream:\n self._theme = AttributeDict(**yaml.safe_load(stream))\n\n with open(defaults_file, \"r\") as stream:\n try:\n self._defaults = AttributeDict(**yaml.safe_load(stream))\n except:\n self._defaults = AttributeDict(**{})\n\n def _get_value(self, path, default_path=\"\"):\n value = self._theme.get_path(path)\n if value is not None:\n return value\n if default_path:\n theme_default = self._theme.get_path(default_path)\n if theme_default:\n return theme_default\n return self._defaults.get_path(default_path)\n\n @property\n def debug_show_base_image(self):\n path = \"debug.show_base_image\"\n return self._get_value(path, path)\n\n @property\n def debug_max_frames(self):\n path = \"debug.max_frames\"\n return self._get_value(path, path)\n\n @property\n def frame_rate(self):\n path = \"frame_rate\"\n return self._get_value(path, path)\n\n @property\n def graphviz_engine(self):\n path = \"graphviz_engine\"\n return self._get_value(path, path)\n\n @property\n def squash_tracks(self):\n path = \"squash_tracks\"\n return self._get_value(path, path)\n\n def skip_track(self, track):\n if track == \"track_1\":\n return True\n return self._get_value(\n f\"tracks.{track}.skip\",\n default_path=f\"tracks.default.skip\",\n )\n\n def pulses_only(self, track):\n return self._get_value(\n f\"tracks.{track}.pulses_only\",\n default_path=f\"tracks.default.pulses_only\",\n )\n\n def allow_self_notes(self, track):\n return self._get_value(\n f\"tracks.{track}.allow_self_notes\",\n default_path=f\"tracks.default.skip\",\n )\n\n @property\n def graphviz_edge_attrs(self):\n path = \"graphviz_edge_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_node_attrs(self):\n path = \"graphviz_node_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_graph_attrs(self):\n path = \"graphviz_graph_attrs\"\n return self._get_value(path, path)\n\n @property\n def nodes_sorted(self):\n path = \"nodes_sorted\"\n return self._get_value(path, path)\n\n @property\n def background_image(self):\n path = \"background_image\"\n return self._get_value(path, path)\n\n @property\n def background_color(self):\n path = \"background_color\"\n return self._get_value(path, path)\n\n @property\n def font(self):\n path = \"font\"\n return self._get_value(path, path)\n\n @property\n def hide_letters(self):\n path = \"hide_letters\"\n return self._get_value(path, path)\n\n @property\n def group_notes_by_track(self):\n path = \"group_notes_by_track\"\n return self._get_value(path, path)\n\n @property\n def width(self):\n path = \"width\"\n return self._get_value(path, path)\n\n @property\n def height(self):\n path = \"height\"\n return self._get_value(path, path)\n\n @property\n def show_lines(self):\n path = \"show_graph_lines\"\n return self._get_value(path, path)\n\n @property\n def graph_line_width(self):\n path = \"graph_line_width\"\n return self._get_value(path, path)\n\n @property\n def graph_line_blur(self):\n path = \"graph_line_blur\"\n return self._get_value(path, path)\n\n @property\n def graph_line_color(self):\n path = \"graph_line_color\"\n return self._get_value(path, path)\n\n @property\n def font_size(self):\n path = \"font_size\"\n return self._get_value(path, path)\n\n @property\n def node_outline_color(self):\n path = \"node.outline_color\"\n return self._get_value(path, path)\n\n @property\n def node_fill_color(self):\n path = \"node.fill_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_color(self):\n path = \"node.text.color\"\n return self._get_value(path, path)\n\n @property\n def node_text_outline_color(self):\n path = \"node.text.stroke_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_stroke_width(self):\n path = \"node.text.stroke_width\"\n return self._get_value(path, path)\n\n @property\n def dpi(self):\n path = \"dpi\"\n return self._get_value(path, path)\n\n @property\n def text_location_offsets(self):\n path = \"text_location_offsets\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_color(self):\n path = \"node.shadow_color\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_size(self):\n path = \"node.shadow_size\"\n return self._get_value(path, path) / 100\n\n @property\n def tracks(self):\n return list(self._theme.tracks.keys())\n\n def note_num_frames(self, track):\n a = self._get_value(\n f\"tracks.{track}.note.num_frames\",\n default_path=f\"tracks.default.note.num_frames\",\n )\n return a\n\n def note_color(self, track):\n return self._get_value(\n f\"tracks.{track}.note.color\",\n default_path=f\"tracks.default.note.color\",\n )\n\n def note_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.note.stroke_width\",\n default_path=f\"tracks.default.note.stroke_width\",\n )\n\n def note_increase_size(self, track):\n return (\n self._get_value(\n f\"tracks.{track}.note.increase_size\",\n default_path=f\"tracks.default.note.increase_size\",\n )\n / 100\n )\n\n def chord_line_width(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.width\",\n default_path=f\"tracks.default.chord_line.width\",\n )\n\n def chord_line_border_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.border_color\",\n default_path=f\"tracks.default.chord_line.border_color\",\n )\n\n def chord_line_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.color\",\n default_path=f\"tracks.default.chord_line.color\",\n )\n\n def ball_radius(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.radius\",\n default_path=f\"tracks.default.ball.radius\",\n )\n\n def ball_g_blur_max(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.g_blur_max\",\n default_path=f\"tracks.default.ball.g_blur_max\",\n )\n\n def ball_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.color\",\n default_path=f\"tracks.default.ball.color\",\n )\n\n def ball_stroke_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_color\",\n default_path=f\"tracks.default.ball.stroke_color\",\n )\n\n def ball_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_width\",\n default_path=f\"tracks.default.ball.stroke_width\",\n )"
},
{
"identifier": "add_frame_to_video",
"path": "src/video_stuff.py",
"snippet": "def add_frame_to_video(writer, frame):\n writer.append_data(np.array(frame))"
},
{
"identifier": "finalize_video_with_music",
"path": "src/video_stuff.py",
"snippet": "def finalize_video_with_music(\n writer,\n video_file_path,\n output_file_name,\n midi_file_path,\n frame_rate,\n soundfont_file,\n frames_written,\n):\n writer.close() # Ensure the writer is closed\n\n # Audio processing\n temp_music_file = os.path.join(get_cache_dir(), \"temp_music.wav\")\n open(temp_music_file, \"ab\").close()\n click.echo(\"Converting midi to wave...\")\n convert_midi_to_wav(\n midi_file_path,\n temp_music_file,\n soundfont_file,\n )\n audio_clip = AudioSegment.from_file(temp_music_file)\n\n audio_duration = int(\n (frames_written / frame_rate) * 1000\n ) # Duration in milliseconds\n audio_clip = audio_clip[:audio_duration] # Truncate the audio\n\n temp_audio = f\"{get_cache_dir()}/music.wav\"\n audio_clip.export(temp_audio, format=\"wav\")\n\n final_video = VideoFileClip(video_file_path)\n final_video_audio = AudioFileClip(temp_audio)\n final_video = final_video.set_audio(final_video_audio)\n\n timestamp = int(time.time())\n final_output_path = f\"{output_file_name}_{timestamp}.mp4\"\n final_video.write_videofile(final_output_path, codec=\"libx264\", audio_codec=\"aac\")\n\n cleanup_cache_dir(get_cache_dir())\n\n return final_output_path"
},
{
"identifier": "initialize_video_writer",
"path": "src/video_stuff.py",
"snippet": "@contextmanager\ndef initialize_video_writer(frame_rate):\n video_file_path = f\"{get_cache_dir()}/video.mp4\"\n writer = imageio.get_writer(video_file_path, fps=frame_rate)\n try:\n yield writer, video_file_path\n finally:\n writer.close()"
}
] | import os
import click
import psutil
from graphviz import Graph
from hurry.filesize import size
from concurrent.futures import ThreadPoolExecutor, as_completed
from src.animation_stuff import AnimationFrames
from src.cache_stuff import (
cleanup_cache_dir,
get_cache_dir,
)
from src.graph_stuff import (
animate_bezier_point,
animate_ellipsis_blur,
draw_fading_bezier_curve,
parse_graph,
get_node_positions,
)
from src.midi_stuff import (
get_note_start_times_in_frames,
TRACK_NOTE_DELIMITER,
)
from src.theme_stuff import Theme
from src.video_stuff import (
add_frame_to_video,
finalize_video_with_music,
initialize_video_writer,
) | 7,292 | def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
):
|
def midi_note_to_pitch_class(midi_note):
_, note = midi_note.split(TRACK_NOTE_DELIMITER)
midi_note = int(note)
note_names = ["C", "Db", "D", "Eb", "E", "F", "F#", "G", "Ab", "A", "Bb", "B"]
return note_names[(midi_note - 1) % 12]
def overlapping_pairs(lst):
return list(zip(lst, lst[1:])) + [(lst[-1], lst[0])] if len(lst) > 1 else []
def create_graphviz_default_sort(theme, track_events_frames):
"""Create a Graphviz without a specified order"""
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(n, label=midi_note_to_pitch_class(n))
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def filter_and_order_custom(reference_list, input_list):
# Extract the numbers from the input strings and convert them to integers
input_numbers = [int(item.split(TRACK_NOTE_DELIMITER)[1]) for item in input_list]
# Create a mapping of number to original string for reconstruction later
number_to_string = dict(zip(input_numbers, input_list))
# Filter and order the input list based on the reference list
ordered_list = [
number_to_string[item] for item in reference_list if item in number_to_string
]
return ordered_list
def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
): | theme = Theme(theme_file_path, default_theme_file_path) | 10 | 2023-11-17 17:56:04+00:00 | 12k |
dazhangyu123/ACMIL | Step3_WSI_classification.py | [
{
"identifier": "save_model",
"path": "utils/utils.py",
"snippet": "def save_model(conf, epoch, model, optimizer, is_best=False, is_last=False):\n to_save = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n 'config': conf,\n }\n\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-%s.pth' % epoch)\n\n # record the checkpoint with best validation accuracy\n if is_best:\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-best.pth')\n\n if is_last:\n checkpoint_path = os.path.join(conf.ckpt_dir, 'checkpoint-last.pth')\n\n torch.save(to_save, checkpoint_path)"
},
{
"identifier": "Struct",
"path": "utils/utils.py",
"snippet": "class Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)"
},
{
"identifier": "set_seed",
"path": "utils/utils.py",
"snippet": "def set_seed(seed):\n # Set random seed for PyTorch\n torch.manual_seed(seed)\n\n # Set random seed for CUDA if available\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # Set random seed for NumPy\n np.random.seed(seed)\n\n # Set random seed for random module\n random.seed(seed)\n\n # Set random seed for CuDNN if available\n if torch.backends.cudnn.enabled:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "Wandb_Writer",
"path": "utils/utils.py",
"snippet": "class Wandb_Writer(object):\n\n def __init__(self, project_name='wsi_classification', group_name='baseline', mode='online', name=0):\n self.wandb = wandb.init(project=project_name, group=group_name, entity=\"dazhangyu123\", save_code=True, mode=mode, name='seed%d'%name)\n\n def log(self, var_name, var, commit=True):\n self.wandb.log({var_name: var}, commit=commit)\n\n def summary(self, var_name, var):\n self.wandb.run.summary[var_name] = var"
},
{
"identifier": "build_HDF5_feat_dataset",
"path": "datasets/datasets.py",
"snippet": "def build_HDF5_feat_dataset(file_path, conf):\r\n if conf.dataset == 'camelyon':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_camelyon(file_path, conf)\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r\n elif conf.dataset == 'bracs':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_bracs(file_path, conf)\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r\n elif conf.dataset == 'lct':\r\n train_split, train_names, val_split, val_names, test_split, test_names = split_dataset_lct(file_path, conf)\r\n # save_dir = 'splits/%s' % conf.dataset\r\n # os.makedirs(save_dir, exist_ok=True)\r\n # json.dump({'train_names': train_names, 'val_names': val_names, 'test_names': test_names},\r\n # open(os.path.join(save_dir, 'split_%s.json' % conf.seed), 'w'))\r\n # sys.exit()\r\n train_split, train_names = generate_fewshot_dataset(train_split, train_names, num_shots=conf.n_shot)\r\n return HDF5_feat_dataset2(train_split, train_names), HDF5_feat_dataset2(val_split, val_names), HDF5_feat_dataset2(test_split, test_names)\r"
},
{
"identifier": "TransformWrapper",
"path": "architecture/transformer.py",
"snippet": "class TransformWrapper(nn.Module):\n def __init__(self, conf):\n super(TransformWrapper, self).__init__()\n self.dimreduction = DimReduction(conf.D_feat, conf.D_inner)\n self.sub_attention = nn.ModuleList()\n for i in range(conf.n_token):\n self.sub_attention.append(MutiHeadAttention(conf.D_inner, 8, n_masked_patch=conf.n_masked_patch, mask_drop=conf.mask_drop))\n self.bag_attention = MutiHeadAttention1(conf.D_inner, 8)\n self.q = nn.Parameter(torch.zeros((1, conf.n_token, conf.D_inner)))\n nn.init.normal_(self.q, std=1e-6)\n self.n_class = conf.n_class\n\n self.classifier = nn.ModuleList()\n for i in range(conf.n_token):\n self.classifier.append(Classifier_1fc(conf.D_inner, conf.n_class, 0.0))\n self.n_token = conf.n_token\n self.Slide_classifier = Classifier_1fc(conf.D_inner, conf.n_class, 0.0)\n\n def forward(self, input, use_attention_mask=True):\n input = self.dimreduction(input)\n q = self.q\n k = input\n v = input\n outputs = []\n attns = []\n for i in range(self.n_token):\n feat_i, attn_i = self.sub_attention[i](q[:, i].unsqueeze(0), k, v, use_attention_mask=use_attention_mask)\n outputs.append(self.classifier[i](feat_i))\n attns.append(attn_i)\n\n attns = torch.cat(attns, 1)\n feat_bag = self.bag_attention(v, attns.softmax(dim=-1).mean(1, keepdim=True))\n\n return torch.cat(outputs, dim=0), self.Slide_classifier(feat_bag), attns"
},
{
"identifier": "AttnMIL",
"path": "architecture/transformer.py",
"snippet": "class AttnMIL(nn.Module):\n def __init__(self, conf, D=128, droprate=0):\n super(AttnMIL, self).__init__()\n self.dimreduction = DimReduction(conf.feat_d, conf.D_inner)\n self.attention = Attention_Gated(conf.D_inner, D, 1)\n self.classifier = Classifier_1fc(conf.D_inner, conf.n_class, droprate)\n\n def forward(self, x): ## x: N x L\n x = x[0]\n med_feat = self.dimreduction(x)\n A = self.attention(med_feat) ## K x N\n\n A_out = A\n A = F.softmax(A, dim=1) # softmax over N\n afeat = torch.mm(A, med_feat) ## K x L\n outputs = self.classifier(afeat)\n return outputs, A_out.unsqueeze(0)"
},
{
"identifier": "TransMIL",
"path": "architecture/transMIL.py",
"snippet": "class TransMIL(nn.Module):\r\n def __init__(self, conf):\r\n super(TransMIL, self).__init__()\r\n self.pos_layer = PPEG(dim=conf.D_inner)\r\n self._fc1 = nn.Sequential(nn.Linear(conf.D_feat, conf.D_inner), nn.ReLU())\r\n self.cls_token = nn.Parameter(torch.randn(1, 1, conf.D_inner))\r\n self.n_classes = conf.n_class\r\n self.layer1 = TransLayer(dim=conf.D_inner)\r\n self.layer2 = TransLayer(dim=conf.D_inner)\r\n self.norm = nn.LayerNorm(conf.D_inner)\r\n self._fc2 = nn.Linear(conf.D_inner, conf.n_class)\r\n\r\n def forward(self, input):\r\n h = self._fc1(input) # [B, n, 512]\r\n\r\n # ---->pad\r\n H = h.shape[1]\r\n _H, _W = int(np.ceil(np.sqrt(H))), int(np.ceil(np.sqrt(H)))\r\n add_length = _H * _W - H\r\n h = torch.cat([h, h[:, :add_length, :]], dim=1) # [B, N, 512]\r\n\r\n # ---->cls_token\r\n B = h.shape[0]\r\n cls_tokens = self.cls_token.expand(B, -1, -1).cuda()\r\n h = torch.cat((cls_tokens, h), dim=1)\r\n\r\n # ---->Translayer x1\r\n h = self.layer1(h) # [B, N, 512]\r\n\r\n # ---->PPEG\r\n h = self.pos_layer(h, _H, _W) # [B, N, 512]\r\n\r\n # ---->Translayer x2\r\n h = self.layer2(h) # [B, N, 512]\r\n\r\n # ---->cls_token\r\n h = self.norm(h)[:, 0]\r\n\r\n # ---->predict\r\n logits = self._fc2(h) # [B, n_classes]\r\n # Y_hat = torch.argmax(logits, dim=1)\r\n # Y_prob = F.softmax(logits, dim=1)\r\n # results_dict = {'logits': logits, 'Y_prob': Y_prob, 'Y_hat': Y_hat}\r\n return logits\r"
},
{
"identifier": "train_one_epoch",
"path": "engine.py",
"snippet": "def train_one_epoch(net, criterion, data_loader, optimizer, device, epoch, conf, log_writer=None):\r\n \"\"\"\r\n Trains the given network for one epoch according to given criterions (loss functions)\r\n \"\"\"\r\n\r\n # Set the network to training mode\r\n net.train()\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\r\n header = 'Epoch: [{}]'.format(epoch)\r\n print_freq = 100\r\n\r\n for data_it, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\r\n # for data_it, data in enumerate(data_loader, start=epoch * len(data_loader)):\r\n # Move input batch onto GPU if eager execution is enabled (default), else leave it on CPU\r\n # Data is a dict with keys `input` (patches) and `{task_name}` (labels for given task)\r\n image_patches = data['input'].to(device, dtype=torch.float32)\r\n labels = data['label'].to(device)\r\n coords = data['coords']\r\n\r\n # # Calculate and set new learning rate\r\n adjust_learning_rate(optimizer, epoch + data_it / len(data_loader), conf)\r\n optimizer.zero_grad()\r\n\r\n if conf.arch == 'dsmil':\r\n loss_forward_and_backward_dsmil(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n elif conf.arch in ('clam_sb', 'clam_mb'):\r\n loss_forward_and_backward_clam(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n elif conf.arch == 'bmil_spvis':\r\n loss_forward_and_backward_bmil(net, image_patches, coords, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n else:\r\n loss_forward_and_backward(net, image_patches, labels, criterion, conf,\r\n device, optimizer, metric_logger, log_writer)\r\n\r\n optimizer.step()\r"
},
{
"identifier": "evaluate",
"path": "engine.py",
"snippet": "@torch.no_grad()\r\ndef evaluate(net, criterion, data_loader, device, conf, header):\r\n # Set the network to evaluation mode\r\n net.eval()\r\n\r\n y_pred = []\r\n y_true = []\r\n\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n\r\n for data in metric_logger.log_every(data_loader, 100, header):\r\n image_patches = data['input'].to(device, dtype=torch.float32)\r\n labels = data['label'].to(device)\r\n coords = data['coords']\r\n\r\n if conf.arch == 'dsmil':\r\n # Compute loss\r\n ins_preds, bag_preds, attn = net(image_patches)\r\n max_preds, _ = torch.max(ins_preds, 0, keepdim=True)\r\n loss = 0.5 * criterion(max_preds, labels) \\\r\n + 0.5 * criterion(bag_preds, labels)\r\n pred = 0.5 * torch.softmax(max_preds, dim=-1) \\\r\n + 0.5 * torch.softmax(bag_preds, dim=-1)\r\n elif conf.arch == 'bmil_spvis':\r\n coords_array = coords.numpy()[0]\r\n output, Y_prob, Y_hat, _, _ = net(image_patches, coords_array, coords_array[:, 1].max(),\r\n coords_array[:, 0].max(), validation=True)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n elif conf.arch in ('clam_sb', 'clam_mb'):\r\n output = net(image_patches)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n else:\r\n # Compute loss\r\n output = net(image_patches)\r\n loss = criterion(output, labels)\r\n pred = torch.softmax(output, dim=-1)\r\n\r\n acc1 = accuracy(pred, labels, topk=(1,))[0]\r\n\r\n metric_logger.update(loss=loss.item())\r\n metric_logger.meters['acc1'].update(acc1.item(), n=labels.shape[0])\r\n\r\n y_pred.append(pred)\r\n y_true.append(labels)\r\n\r\n y_pred = torch.cat(y_pred, dim=0)\r\n y_true = torch.cat(y_true, dim=0)\r\n\r\n AUROC_metric = torchmetrics.AUROC(num_classes=conf.n_class, average='macro').to(device)\r\n AUROC_metric(y_pred, y_true)\r\n auroc = AUROC_metric.compute().item()\r\n F1_metric = torchmetrics.F1Score(num_classes=conf.n_class, average='macro').to(device)\r\n F1_metric(y_pred, y_true)\r\n f1_score = F1_metric.compute().item()\r\n\r\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f} auroc {AUROC:.3f} f1_score {F1:.3f}'\r\n .format(top1=metric_logger.acc1, losses=metric_logger.loss, AUROC=auroc, F1=f1_score))\r\n\r\n return auroc, metric_logger.acc1.global_avg, f1_score, metric_logger.loss.global_avg\r"
},
{
"identifier": "MILNet",
"path": "architecture/dsmil.py",
"snippet": "class MILNet(nn.Module):\r\n def __init__(self, i_classifier, b_classifier):\r\n super(MILNet, self).__init__()\r\n self.i_classifier = i_classifier\r\n self.b_classifier = b_classifier\r\n\r\n def forward(self, x, is_train=True):\r\n feats, classes = self.i_classifier(x[0])\r\n # print(feats)\r\n prediction_bag, A, B = self.b_classifier(feats, classes, is_train=is_train)\r\n return classes, prediction_bag, A\r"
},
{
"identifier": "FCLayer",
"path": "architecture/dsmil.py",
"snippet": "class FCLayer(nn.Module):\r\n def __init__(self, in_size, out_size=1):\r\n super(FCLayer, self).__init__()\r\n self.fc = nn.Sequential(nn.Linear(in_size, out_size))\r\n\r\n def forward(self, feats):\r\n x = self.fc(feats)\r\n return feats, x\r"
},
{
"identifier": "BClassifier",
"path": "architecture/dsmil.py",
"snippet": "class BClassifier(nn.Module):\r\n def __init__(self, conf, dropout_v=0.0, nonlinear=True, passing_v=False,\r\n confounder_path=False): # K, L, N\r\n super(BClassifier, self).__init__()\r\n self.n_masked_patch = conf.n_masked_patch\r\n input_size=conf.D_feat\r\n output_class=conf.n_class\r\n if nonlinear:\r\n self.q = nn.Sequential(nn.Linear(input_size, conf.D_inner), nn.ReLU(), nn.Linear(conf.D_inner, 128), nn.Tanh())\r\n else:\r\n self.q = nn.Linear(input_size, conf.D_inner)\r\n if passing_v:\r\n self.v = nn.Sequential(\r\n nn.Dropout(dropout_v),\r\n nn.Linear(input_size, input_size),\r\n nn.ReLU()\r\n )\r\n else:\r\n self.v = nn.Identity()\r\n\r\n ### 1D convolutional layer that can handle multiple class (including binary)\r\n self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size)\r\n self.confounder_path = None\r\n if confounder_path:\r\n self.confounder_path = confounder_path\r\n conf_list = []\r\n for i in confounder_path:\r\n conf_list.append(torch.from_numpy(np.load(i)).float())\r\n conf_tensor = torch.cat(conf_list,\r\n 0) # [ k, C, K] k-means, c classes , K-dimension, should concatenate at centers k\r\n conf_tensor_dim = conf_tensor.shape[-1]\r\n self.register_buffer(\"confounder_feat\", conf_tensor)\r\n joint_space_dim = 128\r\n dropout_v = 0.1\r\n self.confounder_W_q = nn.Linear(input_size, joint_space_dim)\r\n self.confounder_W_k = nn.Linear(conf_tensor_dim, joint_space_dim)\r\n self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size + conf_tensor_dim)\r\n # self.classifier = nn.Linear(self.L*self.K+in_size, out_size)\r\n self.dropout = nn.Dropout(dropout_v)\r\n\r\n\r\n def forward(self, feats, c, is_train=True): # N x K, N x C\r\n device = feats.device\r\n V = self.v(feats) # N x V, unsorted\r\n Q = self.q(feats).view(feats.shape[0], -1) # N x Q, unsorted\r\n # handle multiple classes without for loop\r\n _, m_indices = torch.sort(c, 0,\r\n descending=True) # sort class scores along the instance dimension, m_indices in shape N x C\r\n # print(m_indices.shape)\r\n m_feats = torch.index_select(feats, dim=0,\r\n index=m_indices[0, :]) # select critical instances, m_feats in shape C x K\r\n q_max = self.q(m_feats) # compute queries of critical instances, q_max in shape C x Q\r\n A = torch.mm(Q, q_max.transpose(0,\r\n 1)) # compute inner product of Q to each entry of q_max, A in shape N x C, each column contains unnormalized attention scores\r\n A = A / torch.sqrt(torch.tensor(Q.shape[1], dtype=torch.float32, device=device)) # normalize attention scores, A in shape N x C,\r\n A = A.transpose(0, 1)\r\n\r\n if self.n_masked_patch > 0 and is_train:\r\n # Get the indices of the top-k largest values\r\n q, c = A.shape\r\n n_masked_patch = min(self.n_masked_patch, c)\r\n _, indices = torch.topk(A, n_masked_patch, dim=-1)\r\n indices = indices.reshape(q, -1)\r\n rand_selected = torch.argsort(torch.rand(*indices.shape), dim=-1)[:, :int(n_masked_patch * 0.5)]\r\n masked_indices = indices[torch.arange(indices.shape[0]).unsqueeze(-1), rand_selected]\r\n random_mask = torch.ones(q, c).to(A.device)\r\n random_mask.scatter_(-1, masked_indices, 0)\r\n A = A.masked_fill(random_mask.reshape(q, -1) == 0, -1e9)\r\n\r\n A_out = A\r\n A = F.softmax(A, dim=-1)\r\n B = torch.mm(A, V) # compute bag representation, B in shape C x V\r\n B = B.view(1, B.shape[0], B.shape[1]) # 1 x C x V\r\n # cls-specific confounder\r\n if self.confounder_path:\r\n if 'agnostic' in self.confounder_path[0]:\r\n device = B.device\r\n bag_q = self.confounder_W_q(B.squeeze(0)) # bs x C x V -- C x V\r\n conf_k = self.confounder_W_k(self.dropout(self.confounder_feat)) # k x V\r\n A = torch.mm(conf_k, bag_q.transpose(0, 1)) # k * C\r\n A = F.softmax(A / torch.sqrt(torch.tensor(conf_k.shape[1], dtype=torch.float32, device=device)),\r\n 0) # normalize attention scores, A in shape N x C,\r\n conf_feats = torch.mm(A.transpose(0, 1),\r\n self.confounder_feat) # compute bag representation, B in shape C x V\r\n B = torch.cat((B, conf_feats.unsqueeze(0)), dim=-1)\r\n elif self.confounder_path: #### cls-agnostic\r\n device = B.device\r\n bag_q = self.confounder_W_q(B.squeeze(0)).unsqueeze(0) # bs x C x V --- C x V ----bs x C x Q\r\n conf_k = self.confounder_W_k(self.confounder_feat.view(-1, B.shape[-1])) # k x C x K ---- C*k x K\r\n conf_k = conf_k.view(self.confounder_feat.shape[0], self.confounder_feat.shape[1],\r\n bag_q.shape[-1]) # C*k x K ---k x C x Q\r\n A = torch.einsum('kcq, bcq -> kcb ', conf_k, bag_q)\r\n # A = torch.mm(conf_k, bag_q.transpose(0, 1))\r\n A = F.softmax(A / torch.sqrt(torch.tensor(conf_k.shape[-1], dtype=torch.float32, device=device)), 0) #\r\n # conf_feats = torch.mm(A.transpose(0, 1), self.confounder_feat) # compute bag representation, B in shape C x V\r\n conf_feats = torch.einsum(' kcb ,kcq-> bcq ', A, self.confounder_feat)\r\n B = torch.cat((B, conf_feats), dim=2)\r\n C = self.fcc(B) # 1 x C x 1\r\n C = C.view(1, -1)\r\n return C, A_out, B\r"
},
{
"identifier": "probabilistic_MIL_Bayes_spvis",
"path": "architecture/bmil.py",
"snippet": "class probabilistic_MIL_Bayes_spvis(nn.Module):\r\n def __init__(self, conf, size_arg=\"small\", top_k=1):\r\n super(probabilistic_MIL_Bayes_spvis, self).__init__()\r\n\r\n # self.size_dict = {\"small\": [1024, 512, 256], \"big\": [1024, 512, 384]}\r\n self.size_dict = {\"small\": [conf.feat_d, 512, 256], \"big\": [conf.feat_d, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n\r\n ard_init = -4.\r\n self.linear1 = nn.Linear(size[0], size[1])\r\n self.linear2a = LinearVDO(size[1], size[2], ard_init=ard_init)\r\n self.linear2b = LinearVDO(size[1], size[2], ard_init=ard_init)\r\n self.linear3 = LinearVDO(size[2], 2, ard_init=ard_init)\r\n\r\n self.gaus_smoothing = GaussianSmoothing(1, 3, 0.5)\r\n\r\n self.classifiers = LinearVDO(size[1], conf.n_class, ard_init=-3.)\r\n\r\n self.dp_0 = nn.Dropout(0.25)\r\n self.dp_a = nn.Dropout(0.25)\r\n self.dp_b = nn.Dropout(0.25)\r\n\r\n self.prior_mu = torch.tensor([-5., 0.])\r\n self.prior_logvar = torch.tensor([-1., 3.])\r\n\r\n initialize_weights(self)\r\n self.top_k = top_k\r\n self.patch_size = conf.patch_size\r\n\r\n def reparameterize(self, mu, logvar):\r\n std = torch.exp(0.5 * logvar)\r\n eps = torch.randn_like(std)\r\n return mu + eps * std\r\n\r\n def kl_logistic_normal(self, mu_pr, mu_pos, logvar_pr, logvar_pos):\r\n return (logvar_pr - logvar_pos) / 2. + (logvar_pos ** 2 + (mu_pr - mu_pos) ** 2) / (2. * logvar_pr ** 2) - 0.5\r\n\r\n def relocate(self):\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n self.linear1 = self.linear1.to(device)\r\n self.linear2a = self.linear2a.to(device)\r\n self.linear2b = self.linear2b.to(device)\r\n self.linear3 = self.linear3.to(device)\r\n\r\n self.dp_0 = self.dp_0.to(device)\r\n self.dp_a = self.dp_a.to(device)\r\n self.dp_b = self.dp_b.to(device)\r\n self.gaus_smoothing = self.gaus_smoothing.to(device)\r\n\r\n self.prior_mu = self.prior_mu.to(device)\r\n self.prior_logvar = self.prior_logvar.to(device)\r\n\r\n self.classifiers = self.classifiers.to(device)\r\n\r\n def forward(self, h, coords, height, width, slide_label=None, validation=False):\r\n h = h[0]\r\n device = h.device\r\n h = F.relu(self.dp_0(self.linear1(h)))\r\n\r\n feat_a = self.dp_a(torch.sigmoid(self.linear2a(h)))\r\n feat_b = self.dp_b(torch.tanh(self.linear2b(h)))\r\n feat = feat_a.mul(feat_b)\r\n params = self.linear3(feat)\r\n\r\n coords = coords // self.patch_size\r\n asign = lambda coord: coord[:, 0] + coord[:, 1] * (width // self.patch_size)\r\n coords = asign(coords)\r\n coords = torch.from_numpy(coords).to(device)\r\n\r\n mu = torch.zeros([1, (height // self.patch_size + 1) * (width // self.patch_size + 1)]).to(device)\r\n logvar = torch.zeros([1, (height // self.patch_size + 1) * (width // self.patch_size + 1)]).to(device)\r\n\r\n mu[:, coords.long()] = params[:, 0]\r\n logvar[:, coords.long()] = params[:, 1]\r\n\r\n mu = mu.view(1, height // self.patch_size + 1, width // self.patch_size + 1)\r\n logvar = logvar.view(1, height // self.patch_size + 1, width // self.patch_size + 1)\r\n\r\n if not validation:\r\n mu_pr = self.prior_mu[slide_label.item()].expand_as(mu)\r\n logvar_pr = self.prior_logvar[slide_label.item()]\r\n kl_div = self.kl_logistic_normal(mu_pr, mu, logvar_pr, logvar)\r\n else:\r\n kl_div = None\r\n\r\n # # no branch\r\n mu = F.pad(mu, (1, 1, 1, 1), mode='constant', value=0)\r\n mu = torch.unsqueeze(mu, dim=0)\r\n mu = self.gaus_smoothing(mu)\r\n\r\n gaus_samples = self.reparameterize(mu, logvar)\r\n gaus_samples = torch.squeeze(gaus_samples, dim=0)\r\n\r\n A = F.sigmoid(gaus_samples)\r\n A = A.view(1, -1)\r\n\r\n patch_A = torch.index_select(A, dim=1, index=coords)\r\n M = torch.mm(patch_A, h) / patch_A.sum()\r\n\r\n logits = self.classifiers(M)\r\n\r\n y_probs = F.softmax(logits, dim=1)\r\n top_instance_idx = torch.topk(y_probs[:, 1], self.top_k, dim=0)[1].view(1, )\r\n top_instance = torch.index_select(logits, dim=0, index=top_instance_idx)\r\n Y_hat = torch.topk(top_instance, 1, dim=1)[1]\r\n Y_prob = F.softmax(top_instance, dim=1)\r\n\r\n if not validation:\r\n return top_instance, Y_prob, Y_hat, kl_div, y_probs, patch_A.view((1, -1))\r\n else:\r\n return top_instance, Y_prob, Y_hat, y_probs, patch_A.view((1, -1))\r"
},
{
"identifier": "CLAM_SB",
"path": "architecture/clam.py",
"snippet": "class CLAM_SB(nn.Module):\r\n def __init__(self, conf, gate=True, size_arg=\"small\", k_sample=8, dropout=True,\r\n instance_loss_fn=nn.CrossEntropyLoss()):\r\n super(CLAM_SB, self).__init__()\r\n n_classes = conf.n_class\r\n self.size_dict = {\"small\": [conf.D_feat, conf.D_inner, 128], \"big\": [conf.D_feat, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n fc = [nn.Linear(size[0], size[1]), nn.ReLU()]\r\n if dropout:\r\n fc.append(nn.Dropout(0.25))\r\n if gate:\r\n attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=conf.n_token)\r\n else:\r\n attention_net = Attn_Net(L=size[1], D=size[2], dropout=dropout, n_classes=conf.n_token)\r\n fc.append(attention_net)\r\n self.attention_net = nn.Sequential(*fc)\r\n self.classifiers = nn.Linear(size[1], n_classes)\r\n instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]\r\n self.instance_classifiers = nn.ModuleList(instance_classifiers)\r\n self.k_sample = k_sample\r\n self.instance_loss_fn = instance_loss_fn\r\n self.n_classes = n_classes\r\n self.subtyping = False\r\n if conf.n_class > 2:\r\n self.subtyping = True\r\n self.n_masked_patch = conf.n_masked_patch\r\n\r\n initialize_weights(self)\r\n\r\n def relocate(self):\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n self.attention_net = self.attention_net.to(device)\r\n self.classifiers = self.classifiers.to(device)\r\n self.instance_classifiers = self.instance_classifiers.to(device)\r\n\r\n @staticmethod\r\n def create_positive_targets(length, device):\r\n return torch.full((length,), 1, device=device).long()\r\n\r\n @staticmethod\r\n def create_negative_targets(length, device):\r\n return torch.full((length,), 0, device=device).long()\r\n\r\n # instance-level evaluation for in-the-class attention branch\r\n def inst_eval(self, A, h, classifier):\r\n device = h.device\r\n if len(A.shape) == 1:\r\n A = A.view(1, -1)\r\n top_p_ids = torch.topk(A, self.k_sample)[1][-1]\r\n top_p = torch.index_select(h, dim=0, index=top_p_ids)\r\n top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1]\r\n top_n = torch.index_select(h, dim=0, index=top_n_ids)\r\n p_targets = self.create_positive_targets(self.k_sample, device)\r\n n_targets = self.create_negative_targets(self.k_sample, device)\r\n\r\n all_targets = torch.cat([p_targets, n_targets], dim=0)\r\n all_instances = torch.cat([top_p, top_n], dim=0)\r\n logits = classifier(all_instances)\r\n all_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)\r\n instance_loss = self.instance_loss_fn(logits, all_targets)\r\n return instance_loss, all_preds, all_targets\r\n\r\n # instance-level evaluation for out-of-the-class attention branch\r\n def inst_eval_out(self, A, h, classifier):\r\n device = h.device\r\n if len(A.shape) == 1:\r\n A = A.view(1, -1)\r\n top_p_ids = torch.topk(A, self.k_sample)[1][-1]\r\n top_p = torch.index_select(h, dim=0, index=top_p_ids)\r\n p_targets = self.create_negative_targets(self.k_sample, device)\r\n logits = classifier(top_p)\r\n p_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)\r\n instance_loss = self.instance_loss_fn(logits, p_targets)\r\n return instance_loss, p_preds, p_targets\r\n\r\n def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False, is_train=True):\r\n A, h = self.attention_net(h[0]) # NxK\r\n A = torch.transpose(A, -1, -2) # KxN\r\n if attention_only:\r\n return A\r\n\r\n\r\n if self.n_masked_patch > 0 and is_train:\r\n # Get the indices of the top-k largest values\r\n b, q, c = A.shape\r\n n_masked_patch = min(self.n_masked_patch, c)\r\n _, indices = torch.topk(A, n_masked_patch, dim=-1)\r\n indices = indices.reshape(b * q, -1)\r\n rand_selected = torch.argsort(torch.rand(*indices.shape), dim=-1)[:,:int(n_masked_patch * 0.5)]\r\n masked_indices = indices[torch.arange(indices.shape[0]).unsqueeze(-1), rand_selected]\r\n random_mask = torch.ones(b*q, c).to(A.device)\r\n random_mask.scatter_(-1, masked_indices, 0)\r\n A = A.masked_fill(random_mask.reshape(b, q, -1) == 0, -1e9)\r\n\r\n\r\n A_raw = A\r\n A = F.softmax(A, dim=-1) # softmax over N\r\n\r\n if instance_eval:\r\n total_inst_loss = 0.0\r\n all_preds = []\r\n all_targets = []\r\n inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() # binarize label\r\n for i in range(len(self.instance_classifiers)):\r\n inst_label = inst_labels[i].item()\r\n classifier = self.instance_classifiers[i]\r\n if inst_label == 1: # in-the-class:\r\n instance_loss, preds, targets = self.inst_eval(A, h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else: # out-of-the-class\r\n if self.subtyping:\r\n instance_loss, preds, targets = self.inst_eval_out(A, h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else:\r\n continue\r\n total_inst_loss += instance_loss\r\n\r\n if self.subtyping:\r\n total_inst_loss /= len(self.instance_classifiers)\r\n\r\n M = torch.mm(A, h)\r\n logits = self.classifiers(M)\r\n if instance_eval:\r\n return logits, total_inst_loss\r\n else:\r\n return logits\r\n # Y_hat = torch.topk(logits, 1, dim=1)[1]\r\n # Y_prob = F.softmax(logits, dim=1)\r\n # if instance_eval:\r\n # results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),\r\n # 'inst_preds': np.array(all_preds)}\r\n # else:\r\n # results_dict = {}\r\n # if return_features:\r\n # results_dict.update({'features': M})\r\n # return logits, Y_prob, Y_hat, A_raw, results_dict\r"
},
{
"identifier": "CLAM_MB",
"path": "architecture/clam.py",
"snippet": "class CLAM_MB(CLAM_SB):\r\n def __init__(self, conf, gate=True, size_arg=\"small\", k_sample=8, dropout=True,\r\n instance_loss_fn=nn.CrossEntropyLoss()):\r\n nn.Module.__init__(self)\r\n n_classes = conf.n_class\r\n self.size_dict = {\"small\": [conf.D_feat, conf.D_inner, 128], \"big\": [conf.D_feat, 512, 384]}\r\n size = self.size_dict[size_arg]\r\n fc = [nn.Linear(size[0], size[1]), nn.ReLU()]\r\n if dropout:\r\n fc.append(nn.Dropout(0.25))\r\n if gate:\r\n attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=n_classes)\r\n else:\r\n attention_net = Attn_Net(L=size[1], D=size[2], dropout=dropout, n_classes=n_classes)\r\n fc.append(attention_net)\r\n self.attention_net = nn.Sequential(*fc)\r\n bag_classifiers = [nn.Linear(size[1], 1) for i in\r\n range(n_classes)] # use an indepdent linear layer to predict each class\r\n self.classifiers = nn.ModuleList(bag_classifiers)\r\n instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]\r\n self.instance_classifiers = nn.ModuleList(instance_classifiers)\r\n self.k_sample = k_sample\r\n self.instance_loss_fn = instance_loss_fn\r\n self.n_classes = n_classes\r\n self.subtyping = False\r\n if conf.n_class > 2:\r\n self.subtyping = True\r\n initialize_weights(self)\r\n\r\n def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False):\r\n device = h.device\r\n h = h[0]\r\n A, h = self.attention_net(h) # NxK\r\n A = torch.transpose(A, 1, 0) # KxN\r\n if attention_only:\r\n return A\r\n A_raw = A\r\n A = softmax_one(A, dim=1) # softmax over N\r\n\r\n if instance_eval:\r\n total_inst_loss = 0.0\r\n all_preds = []\r\n all_targets = []\r\n inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() # binarize label\r\n for i in range(len(self.instance_classifiers)):\r\n inst_label = inst_labels[i].item()\r\n classifier = self.instance_classifiers[i]\r\n if inst_label == 1: # in-the-class:\r\n instance_loss, preds, targets = self.inst_eval(A[i], h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else: # out-of-the-class\r\n if self.subtyping:\r\n instance_loss, preds, targets = self.inst_eval_out(A[i], h, classifier)\r\n all_preds.extend(preds.cpu().numpy())\r\n all_targets.extend(targets.cpu().numpy())\r\n else:\r\n continue\r\n total_inst_loss += instance_loss\r\n\r\n if self.subtyping:\r\n total_inst_loss /= len(self.instance_classifiers)\r\n\r\n M = torch.mm(A, h)\r\n logits = torch.empty(1, self.n_classes).float().to(device)\r\n for c in range(self.n_classes):\r\n logits[0, c] = self.classifiers[c](M[c])\r\n if instance_eval:\r\n return logits, total_inst_loss\r\n else:\r\n return logits\r"
},
{
"identifier": "mean_max",
"path": "modules/mean_max.py",
"snippet": "def initialize_weights(module):\n def __init__(self,conf,dropout=True,act='relu',test=False):\n def forward(self,x):\n def __init__(self,conf,dropout=True,act='relu',test=False):\n def forward(self,x):\nclass MeanMIL(nn.Module):\nclass MaxMIL(nn.Module):"
}
] | import sys
import os
import yaml
import argparse
import torch
from pprint import pprint
from torch import nn
from torch.utils.data import DataLoader
from utils.utils import save_model, Struct, set_seed, Wandb_Writer
from datasets.datasets import build_HDF5_feat_dataset
from architecture.transformer import TransformWrapper, AttnMIL
from architecture.transMIL import TransMIL
from engine import train_one_epoch, evaluate
from architecture.dsmil import MILNet, FCLayer, BClassifier
from architecture.bmil import probabilistic_MIL_Bayes_spvis
from architecture.clam import CLAM_SB, CLAM_MB
from modules import mean_max
| 10,080 |
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
net = CLAM_MB(conf).to(device)
elif conf.arch == 'dsmil':
|
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
net = CLAM_MB(conf).to(device)
elif conf.arch == 'dsmil':
| i_classifier = FCLayer(conf.D_feat, conf.n_class)
| 11 | 2023-11-12 14:07:34+00:00 | 12k |
zhang-tao-whu/DVIS_Plus | dvis_Plus/ctvis.py | [
{
"identifier": "VideoSetCriterion",
"path": "mask2former_video/modeling/criterion.py",
"snippet": "class VideoSetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,\n num_points, oversample_ratio, importance_sample_ratio, frames=2):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n self.frames = frames\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device\n )\n target_classes[idx] = target_classes_o.to(target_classes)\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {\"loss_ce\": loss_ce}\n return losses\n \n def loss_masks(self, outputs, targets, indices, num_masks):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n # Modified to handle video\n target_masks = torch.cat([t['masks'][i] for t, (_, i) in zip(targets, indices)]).to(src_masks)\n\n # No need to upsample predictions as we are using normalized coordinates :)\n # NT x 1 x H x W\n src_masks = src_masks.flatten(0, 1)[:, None]\n target_masks = target_masks.flatten(0, 1)[:, None]\n\n with torch.no_grad():\n # sample point_coords\n point_coords = get_uncertain_point_coords_with_randomness(\n src_masks.to(torch.float32),\n lambda logits: calculate_uncertainty(logits),\n self.num_points,\n self.oversample_ratio,\n self.importance_sample_ratio,\n )\n # get gt labels\n point_labels = point_sample(\n target_masks,\n point_coords.to(target_masks),\n align_corners=False,\n ).squeeze(1)\n\n point_logits = point_sample(\n src_masks,\n point_coords.to(src_masks),\n align_corners=False,\n ).squeeze(1)\n\n losses = {\n \"loss_mask\": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),\n \"loss_dice\": dice_loss_jit(point_logits, point_labels, num_masks),\n }\n\n del src_masks\n del target_masks\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_masks):\n loss_map = {\n 'labels': self.loss_labels,\n 'masks': self.loss_masks,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_masks)\n\n def forward(self, outputs, targets, matcher_outputs=None, ret_match_result=False):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n if matcher_outputs is None:\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n else:\n outputs_without_aux = {k: v for k, v in matcher_outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n if matcher_outputs is None:\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n if ret_match_result:\n return losses, indices\n return losses\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"num_points: {}\".format(self.num_points),\n \"oversample_ratio: {}\".format(self.oversample_ratio),\n \"importance_sample_ratio: {}\".format(self.importance_sample_ratio),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)"
},
{
"identifier": "VideoHungarianMatcher",
"path": "mask2former_video/modeling/matcher.py",
"snippet": "class VideoHungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"].to(torch.int64)\n\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n try:\n cost_class = -out_prob[:, tgt_ids]\n except:\n cost_class = 0.0\n print(tgt_ids)\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # out_mask = out_mask[:, None]\n # tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)"
},
{
"identifier": "HungarianMatcher",
"path": "mask2former/modeling/matcher.py",
"snippet": "class HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n try:\n cost_class = -out_prob[:, tgt_ids]\n except:\n cost_class = 0.0\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask)\n\n out_mask = out_mask[:, None]\n tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n \n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)"
},
{
"identifier": "retry_if_cuda_oom",
"path": "mask2former_video/utils/memory.py",
"snippet": "def retry_if_cuda_oom(func):\n \"\"\"\n Makes a function retry itself after encountering\n pytorch's CUDA OOM error.\n It will first retry after calling `torch.cuda.empty_cache()`.\n If that still fails, it will then retry by trying to convert inputs to CPUs.\n In this case, it expects the function to dispatch to CPU implementation.\n The return values may become CPU tensors as well and it's user's\n responsibility to convert it back to CUDA tensor if needed.\n Args:\n func: a stateless callable that takes tensor-like objects as arguments\n Returns:\n a callable which retries `func` if OOM is encountered.\n Examples:\n ::\n output = retry_if_cuda_oom(some_torch_function)(input1, input2)\n # output may be on CPU even if inputs are on GPU\n Note:\n 1. When converting inputs to CPU, it will only look at each argument and check\n if it has `.device` and `.to` for conversion. Nested structures of tensors\n are not supported.\n 2. Since the function might be called more than once, it has to be\n stateless.\n \"\"\"\n\n def maybe_to_cpu(x):\n try:\n like_gpu_tensor = x.device.type == \"cuda\" and hasattr(x, \"to\")\n except AttributeError:\n like_gpu_tensor = False\n if like_gpu_tensor:\n return x.to(device=\"cpu\").to(torch.float32)\n else:\n return x\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Clear cache and retry\n torch.cuda.empty_cache()\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Try on CPU. This slows down the code significantly, therefore print a notice.\n logger = logging.getLogger(__name__)\n logger.info(\"Attempting to copy inputs to CPU due to CUDA OOM\")\n new_args = (maybe_to_cpu(x) for x in args)\n new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}\n with autocast(enabled=False):\n return func(*new_args, **new_kwargs)\n\n return wrapped"
}
] | import logging
import einops
import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
from typing import Tuple
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from mask2former_video.modeling.criterion import VideoSetCriterion
from mask2former_video.modeling.matcher import VideoHungarianMatcher
from mask2former.modeling.matcher import HungarianMatcher
from mask2former_video.utils.memory import retry_if_cuda_oom
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.config import configurable
from detectron2.structures import BitMasks
from detectron2.utils.registry import Registry | 7,514 | losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
"window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE,
# ctvis
"image_matcher": image_matcher,
"cl_plugin": cl_plugin,
}
@property
def device(self):
return self.pixel_mean.device
def prepare_for_cl_plugin(self, outputs, targets):
del outputs['aux_outputs'], outputs['pred_embds'], outputs['pred_embds_without_norm'], outputs['mask_features']
for item in targets:
item["masks"] = item["masks"].squeeze(1)
item["ids"] = item["ids"].squeeze(1)
outputs['pred_masks'] = outputs['pred_masks'].squeeze(2)
outputs['pred_reid_embed'] = einops.rearrange(outputs['pred_reid_embed'], 'b c t q -> (b t) q c')
return outputs, targets
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
if not self.training and self.window_inference:
outputs = self.run_window_inference(images.tensor, window_size=3)
else:
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
outputs, targets = self.frame_decoder_loss_reshape(outputs, targets)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
# for cl loss
det_outputs, gt_instances = self.prepare_for_cl_plugin(outputs, targets)
losses.update(self.cl_plugin.train_loss(
det_outputs, gt_instances, self.image_matcher))
return losses
else:
outputs = self.post_processing(outputs)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
mask_pred_result = mask_pred_results[0]
first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1])
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
|
logger = logging.getLogger(__name__)
@META_ARCH_REGISTRY.register()
class CTMinVIS(nn.Module):
"""
Copied from "https://github.com/NVlabs/MinVIS".
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# video
num_frames,
window_inference,
# ctvis
image_matcher,
cl_plugin,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
self.num_frames = num_frames
self.window_inference = window_inference
self.image_matcher = image_matcher
self.cl_plugin = cl_plugin
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = VideoHungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
# for cl loss
image_matcher = HungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
cl_plugin = build_cl_plugin(cfg) # train
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
"window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE,
# ctvis
"image_matcher": image_matcher,
"cl_plugin": cl_plugin,
}
@property
def device(self):
return self.pixel_mean.device
def prepare_for_cl_plugin(self, outputs, targets):
del outputs['aux_outputs'], outputs['pred_embds'], outputs['pred_embds_without_norm'], outputs['mask_features']
for item in targets:
item["masks"] = item["masks"].squeeze(1)
item["ids"] = item["ids"].squeeze(1)
outputs['pred_masks'] = outputs['pred_masks'].squeeze(2)
outputs['pred_reid_embed'] = einops.rearrange(outputs['pred_reid_embed'], 'b c t q -> (b t) q c')
return outputs, targets
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
if not self.training and self.window_inference:
outputs = self.run_window_inference(images.tensor, window_size=3)
else:
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
outputs, targets = self.frame_decoder_loss_reshape(outputs, targets)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
# for cl loss
det_outputs, gt_instances = self.prepare_for_cl_plugin(outputs, targets)
losses.update(self.cl_plugin.train_loss(
det_outputs, gt_instances, self.image_matcher))
return losses
else:
outputs = self.post_processing(outputs)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
mask_pred_result = mask_pred_results[0]
first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1])
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
| return retry_if_cuda_oom(self.inference_video)( | 3 | 2023-11-14 10:55:11+00:00 | 12k |
ej0cl6/TextEE | TextEE/models/DyGIEpp/data.py | [
{
"identifier": "Graph",
"path": "TextEE/models/DyGIEpp/graph.py",
"snippet": "class Graph(object):\n def __init__(self, entities, triggers, relations, roles, vocabs, gold=True):\n \"\"\"\n :param entities (list): A list of entities represented as a tuple of\n (start_offset, end_offset, label_idx). end_offset = the index of the end\n token + 1.\n :param triggers (list): A list of triggers represented as a tuple of\n (start_offset, end_offset, label_idx). end_offset = the index of the end\n token + 1.\n :param relations (list): A list of relations represented as a tuple of\n ((start_offset,end_offset,ent_type), (start_offset,end_offset,ent_type), label_idx). \n :param roles: A list of roles represented as a tuple of \n ((start_offset,end_offset,trigger_type), (start_offset,end_offset,ent_type), label_idx).\n :param vocabs (dict): Label type vocabularies.\n :param gold (bool): A marker that mark the graph is a gold annotation or not.\n \"\"\"\n self.entities = [(s,e,l,gold) for (s,e,l) in entities] if gold is not None else entities\n # we write into this form because of \"copy\" function\n self.triggers = [(s,e,l,gold) for (s,e,l) in triggers] if gold is not None else triggers\n self.relations = [(e1,e2,l,gold) for (e1,e2,l) in relations] if gold is not None else relations\n self.roles = [(t1,e2,l,gold) for (t1,e2,l) in roles] if gold is not None else roles\n \n self.vocabs = vocabs\n self.entity_type_itos = vocabs.get('entity_type_itos', None)\n self.event_type_itos = vocabs.get('event_type_itos', None)\n self.relation_type_itos = vocabs.get('relation_type_itos', None)\n self.role_type_itos = vocabs.get('role_type_itos', None)\n\n self.entity_num = len(entities)\n self.trigger_num = len(triggers)\n self.relation_num = len(relations)\n self.role_num = len(roles)\n\n # subscores\n self.entity_scores = [0]*self.entity_num\n self.trigger_scores = [0]*self.trigger_num\n self.relation_scores = [0]*self.relation_num\n self.role_scores = [0]*self.role_num\n\n # node span embedding\n self.entity_emb = [None]*self.entity_num\n self.trigger_emb = [None]*self.trigger_num\n\n # span map -- this is for checking overlapping\n self.entity_map = {}\n self.trigger_map = {}\n self.relation_map = {}\n self.role_map = {}\n for idx,ent in enumerate(self.entities):\n if (ent[0], ent[1]) in self.entity_map.keys():\n print('entity span duplication in initialization')\n else:\n self.entity_map[(ent[0], ent[1])] = idx\n for idx,tri in enumerate(self.triggers):\n if (tri[0], tri[1]) in self.trigger_map.keys():\n #raise ValueError('trigger span duplication in initialization')\n print('trigger span duplication in initialization')\n # ipdb.set_trace()\n else:\n self.trigger_map[(tri[0], tri[1])] = idx\n \n for idx,rel in enumerate(self.relations):\n if (rel[0][0], rel[0][1], rel[1][0], rel[1][1]) in self.relation_map.keys():\n raise ValueError('relation span duplication in initialization')\n else:\n self.relation_map[(rel[0][0], rel[0][1], rel[1][0], rel[1][1])] = idx\n \n for idx,role in enumerate(self.roles):\n if (role[0][0], role[0][1], role[1][0], role[1][1]) in self.role_map.keys():\n #raise ValueError('role span duplication in initialization')\n print('role span duplication in initialization')\n #pass\n else:\n self.role_map[(role[0][0], role[0][1], role[1][0], role[1][1])] = idx\n\n def __eq__(self, other):\n # TODO (I-Hung) Haven't decide whether Gold marker should also be consider\n if isinstance(other, Graph):\n equal = (self.entities == other.entities and\n self.triggers == other.triggers and\n self.relations == other.relations and\n self.roles == other.roles )\n return equal\n return False\n\n def to_dict(self):\n \"\"\"Convert a graph to a dict object\n :return (dict): A dictionary representing the graph, where label indices\n have been replaced with label strings.\n \"\"\"\n entities = [[i, j, self.entity_type_itos[k]] for i, j, k, _ in self.entities]\n triggers = [[i, j, self.event_type_itos[k]] for i, j, k, _ in self.triggers]\n relations = []\n for rel in self.relations:\n arg1_span = (rel[0][0], rel[0][1])\n arg1 = self.entity_map[arg1_span]\n arg2_span = (rel[1][0], rel[1][1])\n arg2 = self.entity_map[arg2_span]\n relations.append([arg1, arg2, self.relation_type_itos[rel[2]]])\n roles = []\n for role in self.roles:\n tri_span = (role[0][0], role[0][1])\n tri = self.trigger_map[tri_span]\n arg_span = (role[1][0], role[1][1])\n arg = self.entity_map[arg_span]\n roles.append([tri, arg, self.role_type_itos[role[2]]])\n return {\n 'entities': entities,\n 'triggers': triggers,\n 'relations': relations,\n 'roles': roles,\n }\n def clean_relation(self):\n self.relations = []\n self.relation_num = 0\n self.relation_scores = []\n self.relation_map = {}\n\n def clean_trigger(self):\n self.triggers = []\n self.trigger_num = 0\n self.trigger_scores = []\n self.trigger_emb = []\n self.trigger_map = {}\n\n def clean_entity(self):\n self.entities = []\n self.entity_num = 0\n self.entity_scores = []\n self.entity_emb = []\n self.entity_map = {}\n\n def clean_role(self):\n self.roles = []\n self.role_num = 0\n self.role_scores = []\n self.role_map = {}\n\n def copy(self):\n \"\"\"Make a copy of the graph\n :return (Graph): a copy of the current graph.\n \"\"\"\n graph = Graph(\n entities=self.entities.copy(),\n triggers=self.triggers.copy(),\n relations=self.relations.copy(),\n roles=self.roles.copy(),\n vocabs=self.vocabs,\n gold=None\n )\n graph.entity_scores = self.entity_scores.copy()\n graph.trigger_scores = self.trigger_scores.copy()\n graph.trigger_emb = self.trigger_emb.copy()\n graph.entity_emb = self.entity_emb.copy()\n graph.relation_scores = self.relation_scores.copy()\n graph.role_scores = self.role_scores.copy()\n return graph\n\n def clean(self, clean_emb=True, relation_directional=True, symmetric_relations=None):\n '''\n This function is used for cleaning entities/triggers/relations/roles\n that have labels 0 (We assume label 0 is 'O')\n '''\n assert symmetric_relations is None\n # clean entities\n remove_entity_idx = []\n for idx, (s,e,k,_) in enumerate(self.entities):\n if k == 0:\n remove_entity_idx.append(idx)\n del_list_inplace(self.entities, remove_entity_idx)\n del_list_inplace(self.entity_scores, remove_entity_idx)\n if clean_emb:\n self.entity_emb = [None] * len(self.entities)\n self.entity_map = {}\n for idx,ent in enumerate(self.entities):\n if (ent[0], ent[1]) in self.entity_map.keys():\n print('entity span duplication in clean')\n else:\n self.entity_map[(ent[0], ent[1])] = idx\n\n # clean triggers\n remove_trigger_idx = []\n for idx, (s,e,k,_) in enumerate(self.triggers):\n if k == 0:\n remove_trigger_idx.append(idx)\n del_list_inplace(self.triggers, remove_trigger_idx)\n del_list_inplace(self.trigger_scores, remove_trigger_idx)\n if clean_emb:\n self.trigger_emb = [None] * len(self.triggers)\n self.trigger_map = {}\n for idx,tri in enumerate(self.triggers):\n if (tri[0], tri[1]) in self.trigger_map.keys():\n # raise ValueError('trigger span duplication in clean')\n pass\n else:\n self.trigger_map[(tri[0], tri[1])] = idx\n\n # clean relations\n relations = [[i, j, k, l] for (i, j, k, l) in self.relations]\n remove_relation_idx = []\n for idx, (e1, e2, k, _) in enumerate(relations):\n if (e1[0], e1[1]) not in self.entity_map.keys():\n remove_relation_idx.append(idx)\n continue\n if (e2[0], e2[1]) not in self.entity_map.keys():\n remove_relation_idx.append(idx)\n continue\n if k == 0:\n remove_relation_idx.append(idx)\n continue\n del_list_inplace(relations, remove_relation_idx)\n del_list_inplace(self.relation_scores, remove_relation_idx)\n self.relations = [tuple(r) for r in relations]\n relations = [(i, j, k, g, l) for (i, j, k, g), l in zip(self.relations, self.relation_scores)]\n if not relation_directional:\n # rebuild relation map\n self.relation_map = {}\n for idx,rel in enumerate(relations):\n if (rel[0][0], rel[0][1], rel[1][0], rel[1][1]) in self.relation_map.keys():\n raise ValueError('relation span duplication in clean')\n else:\n self.relation_map[(rel[0][0], rel[0][1], rel[1][0], rel[1][1])] = idx\n\n relations_tmp = []\n for i, j, k, g, l in relations:\n if (j[0], j[1], i[0], i[1]) in self.relation_map.keys():\n if i[0] <= j[0]: # follow the smaller one's prediction\n relations_tmp.append((i, j, k, g, l))\n relations_tmp.append((j, i, k, g, l))\n else:\n relations_tmp.append((i, j, k, g, l))\n relations_tmp.append((j, i, k, g, l))\n relations = relations_tmp\n self.relations = [(i, j, k, g) for i, j, k, g,_ in relations]\n self.relation_scores = [l for _, _, _, _, l in relations]\n # rebuild relation map\n self.relation_map = {}\n for idx,rel in enumerate(self.relations):\n if (rel[0][0], rel[0][1], rel[1][0], rel[1][1]) in self.relation_map.keys():\n raise ValueError('relation span duplication in clean')\n else:\n self.relation_map[(rel[0][0], rel[0][1], rel[1][0], rel[1][1])] = idx\n \n # clean roles\n roles = [[i, j, k, g] for (i, j, k, g) in self.roles]\n remove_role_idx = []\n for idx, (t, e, k, g) in enumerate(roles):\n if (t[0], t[1]) not in self.trigger_map.keys():\n remove_role_idx.append(idx)\n continue\n if (e[0], e[1]) not in self.entity_map.keys():\n remove_role_idx.append(idx)\n continue\n if k == 0:\n remove_role_idx.append(idx)\n continue\n del_list_inplace(roles, remove_role_idx)\n del_list_inplace(self.role_scores, remove_role_idx) \n self.roles = [tuple(r) for r in roles]\n # rebuild role map\n self.role_map = {}\n for idx,role in enumerate(self.roles):\n if (role[0][0], role[0][1], role[1][0], role[1][1]) in self.role_map.keys():\n # raise ValueError('role span duplication in clean')\n pass\n else:\n self.role_map[(role[0][0], role[0][1], role[1][0], role[1][1])] = idx\n\n self.entity_num = len(self.entities)\n self.trigger_num = len(self.triggers)\n self.relation_num = len(self.relations)\n self.role_num = len(self.roles)\n \n def add_entity(self, start, end, label, emb=None, score_norm=0, gold=False):\n \"\"\"Add an entity mention to the graph.\n :param start (int): Start token offset of the entity mention.\n :param end (int): End token offset of the entity mention + 1.\n :param label (int): Index of the entity type label.\n \"\"\"\n # check whether this entity is duplicate\n if (start, end) not in self.entity_map.keys():\n self.entity_map[(start, end)]=self.entity_num\n self.entities.append((start, end, label, gold))\n self.entity_num = len(self.entities)\n self.entity_scores.append(score_norm)\n self.entity_emb.append(emb)\n return True\n else:\n #print('Duplicate entity for span ({}, {})'.format(start, end))\n return False\n\n def add_trigger(self, start, end, label, emb=None, score_norm=0, gold=False):\n \"\"\"Add an event trigger to the graph.\n :param start (int): Start token offset of the trigger.\n :param end (int): End token offset of the trigger + 1.\n :param label (int): Index of the event type label.\n :param score (float): Label score.\n :param gold (bool): Marker that mark this trigger is gold or not\n \"\"\"\n # check whether this trigger is duplicate\n if (start, end) not in self.trigger_map.keys():\n self.trigger_map[(start, end)]=self.trigger_num\n self.triggers.append((start, end, label, gold))\n self.trigger_num = len(self.triggers)\n self.trigger_scores.append(score_norm)\n self.trigger_emb.append(emb)\n return True\n else:\n #print('Duplicate trigger for span ({}, {})'.format(start, end))\n return False\n\n def add_relation(self, ent_1, ent_2, label, score_norm=0, gold=False):\n \"\"\"Add a relation edge to the graph.\n :param ent_1 (tuple(int, int, str)): start& end of the entity node 1.\n :param ent_2 (tuple(int, int, str)): start& end of the entity node 2.\n :param label (int): Index of the relation type label.\n :param score (float): Label score.\n :param gold (bool): Marker that mark this relation is gold or not\n \"\"\"\n assert ((ent_1[0], ent_1[1]) in self.entity_map.keys())\n assert ((ent_2[0], ent_2[1]) in self.entity_map.keys())\n if (ent_1[0], ent_1[1], ent_2[0], ent_2[1]) not in self.relation_map.keys():\n self.relation_map[(ent_1[0], ent_1[1], ent_2[0], ent_2[1])]=self.relation_num\n self.relations.append((ent_1, ent_2, label, gold))\n self.relation_num = len(self.relations)\n self.relation_scores.append(score_norm)\n return True\n else:\n return False\n\n def add_role(self, tri, ent, label, score_norm=0, gold=False):\n \"\"\"Add an event-argument link edge to the graph.\n :param tri (tuple(int, int, str)): start& end of the trigger node.\n :param ent (tuple(int, int, str)): start& end of the entity node.\n :param label (int): Index of the role label.\n :param score (float): Label score.\n :param gold (bool): Marker that mark this role is gold or not\n \"\"\"\n assert ((ent[0], ent[1]) in self.entity_map.keys())\n assert ((tri[0], tri[1]) in self.trigger_map.keys())\n if (tri[0], tri[1], ent[0], ent[1]) not in self.role_map.keys():\n self.role_map[(tri[0], tri[1], ent[0], ent[1])]=self.role_num\n self.roles.append((tri, ent, label, gold))\n self.role_num = len(self.roles)\n self.role_scores.append(score_norm)\n return True\n else:\n return False\n\n @staticmethod\n def empty_graph(vocabs):\n \"\"\"Create a graph without any node and edge.\n :param vocabs (dict): Vocabulary object.\n \"\"\"\n return Graph([], [], [], [], vocabs)\n\n\n def clean_non_gold(self, relation_directional=True, symmetric_relations=None):\n '''\n This function is used for cleaning entities/triggers/relations/roles\n that have gold label==False\n '''\n assert symmetric_relations is None\n # clean entities\n remove_entity_idx = []\n for idx, (s,e,_,gold) in enumerate(self.entities):\n if not gold:\n remove_entity_idx.append(idx)\n del_list_inplace(self.entities, remove_entity_idx)\n del_list_inplace(self.entity_scores, remove_entity_idx)\n del_list_inplace(self.entity_emb, remove_entity_idx)\n self.entity_map = {}\n for idx,ent in enumerate(self.entities):\n if (ent[0], ent[1]) in self.entity_map.keys():\n print('entity span duplication in clean')\n else:\n self.entity_map[(ent[0], ent[1])] = idx\n\n # clean triggers\n remove_trigger_idx = []\n for idx, (s,e,_,gold) in enumerate(self.triggers):\n if not gold:\n remove_trigger_idx.append(idx)\n del_list_inplace(self.triggers, remove_trigger_idx)\n del_list_inplace(self.trigger_scores, remove_trigger_idx)\n del_list_inplace(self.trigger_emb, remove_trigger_idx)\n self.trigger_map = {}\n for idx,tri in enumerate(self.triggers):\n if (tri[0], tri[1]) in self.trigger_map.keys():\n raise ValueError('trigger span duplication in clean')\n else:\n self.trigger_map[(tri[0], tri[1])] = idx\n \n # clean relations\n relations = [[i, j, k, l] for (i, j, k, l) in self.relations]\n remove_relation_idx = []\n for idx, (e1, e2, _, gold) in enumerate(relations):\n if (e1[0], e1[1]) not in self.entity_map.keys():\n remove_relation_idx.append(idx)\n continue\n if (e2[0], e2[1]) not in self.entity_map.keys():\n remove_relation_idx.append(idx)\n continue\n if not gold:\n remove_relation_idx.append(idx)\n continue\n del_list_inplace(relations, remove_relation_idx)\n del_list_inplace(self.relation_scores, remove_relation_idx)\n self.relations = [tuple(r) for r in relations]\n relations = [(i, j, k, g, l) for (i, j, k, g), l in zip(self.relations, self.relation_scores)]\n if not relation_directional:\n # rebuild relation map\n self.relation_map = {}\n for idx,rel in enumerate(relations):\n if (rel[0][0], rel[0][1], rel[1][0], rel[1][1]) in self.relation_map.keys():\n raise ValueError('relation span duplication in clean')\n else:\n self.relation_map[(rel[0][0], rel[0][1], rel[1][0], rel[1][1])] = idx\n\n relations_tmp = []\n for i, j, k, g, l in relations:\n if (j[0], j[1], i[0], i[1]) in self.relation_map.keys():\n if i[0] <= j[0]: # follow the smaller one's prediction\n relations_tmp.append((i, j, k, g, l))\n relations_tmp.append((j, i, k, g, l))\n else:\n relations_tmp.append((i, j, k, g, l))\n relations_tmp.append((j, i, k, g, l))\n relations = relations_tmp\n self.relations = [(i, j, k, g) for i, j, k, g,_ in relations]\n self.relation_scores = [l for _, _, _, _, l in relations]\n # rebuild relation map\n self.relation_map = {}\n for idx,rel in enumerate(self.relations):\n if (rel[0][0], rel[0][1], rel[1][0], rel[1][1]) in self.relation_map.keys():\n raise ValueError('relation span duplication in initialization')\n else:\n self.relation_map[(rel[0][0], rel[0][1], rel[1][0], rel[1][1])] = idx\n \n # clean roles\n roles = [[i, j, k, g] for (i, j, k, g) in self.roles]\n remove_role_idx = []\n for idx, (t, e, _, gold) in enumerate(roles):\n if (t[0], t[1]) not in self.trigger_map.keys():\n remove_role_idx.append(idx)\n continue\n if (e[0], e[1]) not in self.entity_map.keys():\n remove_role_idx.append(idx)\n continue\n if not gold:\n remove_role_idx.append(idx)\n continue\n del_list_inplace(roles, remove_role_idx)\n del_list_inplace(self.role_scores, remove_role_idx) \n self.roles = [tuple(r) for r in roles]\n # rebuild role map\n self.role_map = {}\n for idx,role in enumerate(self.roles):\n if (role[0][0], role[0][1], role[1][0], role[1][1]) in self.role_map.keys():\n raise ValueError('role span duplication in initialization')\n else:\n self.role_map[(role[0][0], role[0][1], role[1][0], role[1][1])] = idx\n self.entity_num = len(self.entities)\n self.trigger_num = len(self.triggers)\n self.relation_num = len(self.relations)\n self.role_num = len(self.roles)"
},
{
"identifier": "enumerate_spans",
"path": "TextEE/models/DyGIEpp/util.py",
"snippet": "def enumerate_spans(\n sentence,\n offset= 0,\n max_span_width= None,\n min_span_width= 1,\n filter_function= None,\n):\n \"\"\"\n Given a sentence, return all token spans within the sentence. Spans are `exclusive`.\n Additionally, you can provide a maximum and minimum span width, which will be used\n to exclude spans outside of this range.\n Finally, you can provide a function mapping `List[T] -> bool`, which will\n be applied to every span to decide whether that span should be included. This\n allows filtering by length, regex matches, pos tags or any Spacy `Token`\n attributes, for example. TODO (I-Hung): the filter function is not yet supported\n\n # Parameters\n sentence : `List[T]`, required.\n The sentence to generate spans for. The type is generic, as this function\n can be used with strings, or Spacy `Tokens` or other sequences. In our usage,\n our input is a list of strings.\n offset : `int`, optional (default = `0`)\n A numeric offset to add to all span start and end indices. This is helpful\n if the sentence is part of a larger structure, such as a document, which\n the indices need to respect.\n max_span_width : `int`, optional (default = `None`)\n The maximum length of spans which should be included. Defaults to len(sentence).\n min_span_width : `int`, optional (default = `1`)\n The minimum length of spans which should be included. Defaults to 1.\n filter_function : `Callable[[List[T]], bool]`, optional (default = `None`)\n A function mapping sequences of the passed type T to a boolean value.\n If `True`, the span is included in the returned spans from the\n sentence, otherwise it is excluded..\n \"\"\"\n max_span_width = max_span_width or len(sentence)\n assert (max_span_width - min_span_width) >= 0\n filter_function = filter_function or (lambda x: True)\n spans = []\n\n for start_index in range(len(sentence)):\n last_end_index = min(start_index + max_span_width, len(sentence))\n first_end_index = min(start_index + min_span_width - 1, len(sentence))\n for end_index in range(first_end_index, last_end_index):\n start = offset + start_index\n end = offset + end_index + 1\n # TODO (I-Hung): need to add this filter function\n #if filter_function(sentence[slice(start_index, end_index)]):\n # spans.append([start, end, 0])\n spans.append([start, end, 0]) # default label is 0\n return spans"
},
{
"identifier": "graph_add_fake_entity",
"path": "TextEE/models/DyGIEpp/util.py",
"snippet": "def graph_add_fake_entity(entities, graph, vocabs, num=None):\n idxs = np.arange(len(entities))\n np.random.shuffle(idxs)\n n_add = 0\n # add fake entity spans to graph\n for idx in idxs:\n entity = entities[idx]\n success = graph.add_entity(start=entity[0], end=entity[1],\n label=vocabs['entity_type']['O'],\n gold=False)\n n_add += success\n if num is not None and n_add >= num:\n break"
},
{
"identifier": "graph_add_fake_trigger",
"path": "TextEE/models/DyGIEpp/util.py",
"snippet": "def graph_add_fake_trigger(triggers, graph, vocabs, num=None):\n idxs = np.arange(len(triggers))\n np.random.shuffle(idxs)\n n_add = 0\n # add fake trigger spans to graph\n for idx in idxs:\n trigger = triggers[idx]\n success = graph.add_trigger(start=trigger[0], end=trigger[1],\n label=vocabs['event_type']['O'],\n gold=False)\n n_add += success\n if num is not None and n_add >= num:\n break"
}
] | import copy, json, logging
import numpy as np
import torch
import ipdb
from torch.utils.data import Dataset
from collections import Counter, namedtuple, defaultdict
from itertools import combinations
from .graph import Graph
from .util import enumerate_spans, graph_add_fake_entity, graph_add_fake_trigger | 8,911 | @property
def relation_type_set(self):
type_set = set()
for inst in self.data:
for relation in inst.get('relation_mentions', []):
type_set.add(relation['relation_type'])
return type_set
@property
def role_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
for arg in event['arguments']:
type_set.add(arg['role'])
return type_set
def load_data(self):
overlength_num = 0
for inst in self.raw_data:
## added
pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]
token_lens = [len(x) for x in pieces]
if 0 in token_lens:
raise ValueError
pieces = [p for ps in pieces for p in ps]
inst['pieces'] = pieces
inst['token_lens'] = token_lens
inst['entity_mentions'] = inst['extra_info']['entity_mentions']
inst['relation_mentions'] = inst['extra_info']['relation_mentions']
inst['event_mentions'] = inst['extra_info']['event_mentions']
##
if not self.test:
if self.max_length != -1 and len(pieces) > self.max_length - 2:
overlength_num += 1
continue
else:
if len(pieces) > self.max_length - 2:
# add token_lens until over-length
piece_counter = 0
for max_token_include, token_len in enumerate(inst['token_lens']):
if piece_counter + token_len >= self.max_length - 2:
logger.info('overlength during testing...')
break
else:
piece_counter += token_len
inst['pieces'] = inst['pieces'][:piece_counter]
inst['token_lens'] = inst['token_lens'][:max_token_include]
inst['tokens'] = inst['tokens'][:max_token_include]
self.data.append(inst)
if overlength_num:
logger.info('Discarded {} overlength instances'.format(overlength_num))
logger.info('Loaded {} DyGIEpp instances from {} E2E instances'.format(len(self), len(self.raw_data)))
def numberize(self, vocabs):
"""Numberize word pieces, labels, etcs.
:param tokenizer: Bert tokenizer.
:param vocabs (dict): a dict of vocabularies.
"""
entity_type_stoi = vocabs.get('entity_type', None)
event_type_stoi = vocabs.get('event_type', None)
relation_type_stoi = vocabs.get('relation_type', None)
role_type_stoi = vocabs.get('role_type', None)
data = []
for inst in self.data:
doc_id = inst['doc_id']
tokens = inst['tokens']
pieces = inst['pieces']
wnd_id = inst['wnd_id']
token_num = len(tokens)
token_lens = inst['token_lens']
entities = inst['entity_mentions']
entities.sort(key=lambda x: x['start'])
events = inst['event_mentions']
# events = clean_events(events)
events.sort(key=lambda x: x['trigger']['start'])
# Pad word pieces with special tokens
piece_idxs = self.tokenizer.encode(pieces,
add_special_tokens=True,
max_length=self.max_length,
truncation=True)
if sum(token_lens) < self.max_length -2:
assert sum(token_lens) +2 == len(piece_idxs)
pad_num = self.max_length - len(piece_idxs)
attn_mask = [1] * len(piece_idxs) + [0] * pad_num
pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)
piece_idxs = piece_idxs + [pad_id] * pad_num
entity_list = [(e['start'], e['end'],
entity_type_stoi[e.get('entity_type', "UNK")])
for e in entities]
trigger_list = [(e['trigger']['start'], e['trigger']['end'],
event_type_stoi[e['event_type']])
for e in events]
# Argument role
role_list = get_role_list(entities, events, role_type_stoi)
# Relations
relation_list = get_relation_list(entities, inst.get('relation_mentions', []),relation_type_stoi)
# Graph
graph = Graph(
entities=entity_list,
triggers=trigger_list,
relations=relation_list,
roles=role_list,
vocabs=vocabs,
gold=True
)
# Add other span from span enumeration
|
logger = logging.getLogger(__name__)
instance_fields = [
'doc_id', 'wnd_id', 'tokens', 'pieces', 'piece_idxs',
'token_lens', 'attention_mask', 'graph', 'trigger_list'
]
batch_fields = [
'doc_ids', 'wnd_ids', 'tokens', 'pieces', 'piece_idxs',
'token_lens', 'attention_masks', 'graphs', 'token_nums',
]
Instance = namedtuple('Instance', field_names=instance_fields,
defaults=[None] * len(instance_fields))
Batch = namedtuple('Batch', field_names=batch_fields,
defaults=[None] * len(batch_fields))
def preprocess_entity(entities):
"""
We prevent the situation that there are more than 1 types for exactly same span
"""
span_map = []
entities_ = []
delete_num = 0
for ent in entities:
if (ent['start'], ent['end']) not in span_map:
entities_.append(ent)
span_map.append((ent['start'], ent['end']))
else:
delete_num += 1
if delete_num:
logger.info('remove {} entities due to span duplication'.format(delete_num))
return entities_
def get_relation_list(entities, relations, vocab,
directional=True, symmetric=None):
entity_idxs = {entity['id']: (i,entity) for i, entity in enumerate(entities)}
visited = [[0] * len(entities) for _ in range(len(entities))]
relation_list = []
for relation in relations:
arg_1 = arg_2 = None
for arg in relation['arguments']:
if arg['role'] == 'Arg-1':
arg_1 = entity_idxs[arg['entity_id']]
elif arg['role'] == 'Arg-2':
arg_2 = entity_idxs[arg['entity_id']]
if arg_1 is None or arg_2 is None:
continue
relation_type = relation['relation_type']
if (not directional and arg_1[0] > arg_2[0]) or \
(directional and symmetric and (relation_type in symmetric) and (arg_1[0] > arg_2[0])):
arg_1, arg_2 = arg_2, arg_1
if visited[arg_1[0]][arg_2[0]] == 0:
# TODO (I-Hung): This will automatically remove multi relation
# scenario, but we first stick to this setup
temp = ((arg_1[1]['start'], arg_1[1]['end'], arg_1[1].get('entity_type', 'UNK')),
(arg_2[1]['start'], arg_2[1]['end'], arg_2[1].get('entity_type', 'UNK')),
vocab[relation_type])
relation_list.append(temp)
if not directional:
temp = ((arg_2[1]['start'], arg_2[1]['end'], arg_2.get('entity_type', 'UNK')),
(arg_1[1]['start'], arg_1[1]['end'], arg_1.get('entity_type', 'UNK')),
vocab[relation_type])
relation_list.append(temp)
visited[arg_2[0]][arg_1[0]] = 1
visited[arg_1[0]][arg_2[0]] = 1
relation_list.sort(key=lambda x: (x[0][0], x[1][0]))
return relation_list
def get_role_list(entities, events, vocab):
entity_idxs = {entity['id']: (i,entity) for i, entity in enumerate(entities)}
visited = [[0] * len(entities) for _ in range(len(events))]
role_list = []
cnt = 0
for i, event in enumerate(events):
for arg in event['arguments']:
entity_idx = entity_idxs[arg['entity_id']]
if visited[i][entity_idx[0]] == 0 and arg['role'] in vocab:
# TODO (I-Hung): This will automatically remove multi role
# scenario, but we first stick to this setup
temp = ((event['trigger']['start'], event['trigger']['end'], event['event_type']),
(entity_idx[1]['start'], entity_idx[1]['end'], entity_idx[1].get('entity_type', 'UNK')),
vocab[arg['role']])
role_list.append(temp)
visited[i][entity_idx[0]] = 1
else:
cnt += 1
role_list.sort(key=lambda x: (x[0][0], x[1][0]))
if cnt:
logger.info('{} times of role are removed in gold because of span duplication'.format(cnt))
return role_list
def clean_events(events):
cleaned_map = {}
for event in events:
key = (event['trigger']['start'], event['trigger']['end'], event['event_type'], event['trigger']['text'])
if key in cleaned_map:
# do argument merging
cleaned_map[key]['arguments'].extend(event['arguments'])
else:
cleaned_map[key] = event
return list(cleaned_map.values())
class IEDataset(Dataset):
def __init__(self, raw_data, tokenizer, config, max_length=128, test=False):
self.raw_data = raw_data
self.tokenizer = tokenizer
self.data = []
self.max_length = max_length
self.test=test
self.max_entity_span = config.max_entity_span
self.min_entity_span = config.min_entity_span
self.max_trigger_span = config.max_trigger_span
self.min_trigger_span = config.min_trigger_span
self.load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
@property
def entity_type_set(self):
type_set = set()
for inst in self.data:
for entity in inst['entity_mentions']:
type_set.add(entity.get('entity_type', "UNK"))
return type_set
@property
def event_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
type_set.add(event['event_type'])
return type_set
@property
def relation_type_set(self):
type_set = set()
for inst in self.data:
for relation in inst.get('relation_mentions', []):
type_set.add(relation['relation_type'])
return type_set
@property
def role_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
for arg in event['arguments']:
type_set.add(arg['role'])
return type_set
def load_data(self):
overlength_num = 0
for inst in self.raw_data:
## added
pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]
token_lens = [len(x) for x in pieces]
if 0 in token_lens:
raise ValueError
pieces = [p for ps in pieces for p in ps]
inst['pieces'] = pieces
inst['token_lens'] = token_lens
inst['entity_mentions'] = inst['extra_info']['entity_mentions']
inst['relation_mentions'] = inst['extra_info']['relation_mentions']
inst['event_mentions'] = inst['extra_info']['event_mentions']
##
if not self.test:
if self.max_length != -1 and len(pieces) > self.max_length - 2:
overlength_num += 1
continue
else:
if len(pieces) > self.max_length - 2:
# add token_lens until over-length
piece_counter = 0
for max_token_include, token_len in enumerate(inst['token_lens']):
if piece_counter + token_len >= self.max_length - 2:
logger.info('overlength during testing...')
break
else:
piece_counter += token_len
inst['pieces'] = inst['pieces'][:piece_counter]
inst['token_lens'] = inst['token_lens'][:max_token_include]
inst['tokens'] = inst['tokens'][:max_token_include]
self.data.append(inst)
if overlength_num:
logger.info('Discarded {} overlength instances'.format(overlength_num))
logger.info('Loaded {} DyGIEpp instances from {} E2E instances'.format(len(self), len(self.raw_data)))
def numberize(self, vocabs):
"""Numberize word pieces, labels, etcs.
:param tokenizer: Bert tokenizer.
:param vocabs (dict): a dict of vocabularies.
"""
entity_type_stoi = vocabs.get('entity_type', None)
event_type_stoi = vocabs.get('event_type', None)
relation_type_stoi = vocabs.get('relation_type', None)
role_type_stoi = vocabs.get('role_type', None)
data = []
for inst in self.data:
doc_id = inst['doc_id']
tokens = inst['tokens']
pieces = inst['pieces']
wnd_id = inst['wnd_id']
token_num = len(tokens)
token_lens = inst['token_lens']
entities = inst['entity_mentions']
entities.sort(key=lambda x: x['start'])
events = inst['event_mentions']
# events = clean_events(events)
events.sort(key=lambda x: x['trigger']['start'])
# Pad word pieces with special tokens
piece_idxs = self.tokenizer.encode(pieces,
add_special_tokens=True,
max_length=self.max_length,
truncation=True)
if sum(token_lens) < self.max_length -2:
assert sum(token_lens) +2 == len(piece_idxs)
pad_num = self.max_length - len(piece_idxs)
attn_mask = [1] * len(piece_idxs) + [0] * pad_num
pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)
piece_idxs = piece_idxs + [pad_id] * pad_num
entity_list = [(e['start'], e['end'],
entity_type_stoi[e.get('entity_type', "UNK")])
for e in entities]
trigger_list = [(e['trigger']['start'], e['trigger']['end'],
event_type_stoi[e['event_type']])
for e in events]
# Argument role
role_list = get_role_list(entities, events, role_type_stoi)
# Relations
relation_list = get_relation_list(entities, inst.get('relation_mentions', []),relation_type_stoi)
# Graph
graph = Graph(
entities=entity_list,
triggers=trigger_list,
relations=relation_list,
roles=role_list,
vocabs=vocabs,
gold=True
)
# Add other span from span enumeration | entity_spans = enumerate_spans(tokens, offset=0, | 1 | 2023-11-15 21:32:56+00:00 | 12k |
maagic6/SDIMV | SDIMV.py | [
{
"identifier": "imageProcess",
"path": "image.py",
"snippet": "class imageProcess:\n def __init__(self, fn):\n ft = filetype.guess(fn)\n self.data = {\"prompt\": \"\", \n \"negative_prompt\": \"\", \n \"steps\": \"\", \n \"sampler\": \"\", \n \"cfg_scale\": \"\", \n \"seed\": \"\", \n \"size\": \"\",\n \"model_hash\": \"\",\n \"model\": \"\",\n \"lora\": \"\"}\n if ft == None:\n self.compatible = False\n elif ft.extension in ['png']:\n self.image = Image.open(fn)\n self.compatible = False\n\n if 'parameters' in self.image.info: #web ui\n self.info = str(self.image.info['parameters'])\n self.metadataType = 'parameters'\n self.compatible = True\n self.image.close()\n del self.image\n elif 'Comment' in self.image.info: #novelai\n self.info = json.loads(self.image.info['Comment'])\n self.metadataType = 'comment'\n self.compatible = True\n self.image.close()\n self.image = None\n del self.image\n elif 'prompt' in self.image.info: #comfyui\n self.info = json.loads(self.image.info['prompt'])\n self.metadataType = 'prompt'\n self.compatible = True\n self.image.close()\n self.image = None\n del self.image\n elif ft.extension == 'jpg':\n self.image = Image.open(fn)\n self.compatible = False\n exif_data = self.image._getexif()\n\n if exif_data is not None:\n for tag, value in ExifTags.TAGS.items():\n if tag in exif_data:\n if ExifTags.TAGS[tag] == \"UserComment\":\n user_comment = exif_data[tag]\n user_comment_unicode = user_comment.decode(\"utf-8\") #decode\n user_comment_unicode_sanitized = user_comment_unicode.replace('UNICODE', '').replace('\\x00', '')\n self.info = user_comment_unicode_sanitized\n self.metadataType = 'parameters'\n self.compatible = True\n \n self.image.close()\n self.image = None\n del self.image\n elif ft.extension in ['mp4']:\n video = MP4(fn)\n self.data = {}\n try:\n if '\\xa9cmt' in video.tags:\n metadata = video.tags['\\xa9cmt']\n self.metadataType = \"video\"\n self.info = json.loads(metadata[0])\n self.compatible = True\n video = None\n del video\n else:\n self.compatible = False\n video = None\n del video\n except:\n self.compatible = False\n video = None\n del video\n else:\n self.compatible = False\n \n def findKeyName(self, data, keys):\n if isinstance(data, dict):\n for key, value in data.items():\n if key == keys:\n return value\n result = self.findKeyName(value, keys)\n if result is not None:\n return result\n elif isinstance(data, list):\n for item in data:\n result = self.findKeyName(item, keys)\n if result is not None:\n return result \n return None\n\n def getInfo(self): # messy\n if self.metadataType == 'parameters':\n matches = re.findall(r'([^:,]+): ([^,]+)', self.info.replace('\\n', ','))\n for match in matches:\n key = match[0].strip().lower().replace(' ', '_')\n value = match[1].strip()\n self.data[key] = value\n try:\n positive = str(re.split(r'Negative prompt: |Steps: ', self.info)[0])\n except:\n positive = \"\"\n self.data[\"prompt\"]=positive\n try:\n negative = str(re.split(r'Negative prompt: ', self.info, maxsplit=1)[1].split('Steps:')[0].strip())\n except:\n negative = \"\"\n self.data[\"negative_prompt\"] = negative\n loraTags = re.findall(r'<lora:[^>]+>', self.info)\n uniqueLoraTags = set(loraTags)\n loraString = ' '.join(uniqueLoraTags)\n self.data[\"lora\"] = loraString\n if \"model\" not in self.data:\n self.data[\"model\"] = \"\"\n return self.data\n if self.metadataType == 'comment': #novelai\n self.data[\"prompt\"] = str(self.info[\"prompt\"])\n self.data[\"negative_prompt\"] = str(self.info[\"uc\"])\n self.data[\"steps\"] = str(self.info[\"steps\"])\n self.data[\"sampler\"] = str(self.info[\"sampler\"])\n self.data[\"cfg_scale\"] = str(self.info[\"scale\"])\n self.data[\"seed\"] = str(self.info[\"seed\"])\n self.data[\"size\"] = str(self.info[\"height\"])+'x'+str(self.info[\"width\"])\n self.data[\"model\"] = ''\n self.data[\"model_hash\"] = ''\n loraTags = re.findall(r'<lora:[^>]+>', str(self.info))\n uniqueLoraTags = set(loraTags)\n loraString = ' '.join(uniqueLoraTags)\n self.data[\"lora\"] = loraString\n return self.data\n if self.metadataType == 'video':\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")[0]\n self.data[\"prompt\"] = self.info.get('prompt', {}).get(f'{promptKey}', {}).get('inputs', {}).get('text')\n self.data[\"negative_prompt\"] = self.info.get('prompt', {}).get('320', {}).get('inputs', {}).get('text')\n self.data[\"steps\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('steps')\n self.data[\"sampler\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('sampler_name')\n self.data[\"cfg_scale\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('cfg')\n self.data[\"seed\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('seed')\n self.data[\"size\"] = str(self.info.get('prompt', {}).get('539', {}).get('inputs', {}).get('height'))+'x'+str(self.info.get('prompt', {}).get('539', {}).get('inputs', {}).get('width'))\n self.data[\"model\"] = self.info.get('prompt', {}).get('513', {}).get('inputs', {}).get('model_name')\n self.data[\"model_hash\"] = \"\"\n self.data[\"lora\"] = \"\"\n return self.data\n if self.metadataType == 'prompt':\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")\n if type(promptKey) == list:\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")[0]\n if \"pre_text\" in self.info.get(f'{promptKey}', {}).get('inputs', {}):\n self.data[\"prompt\"] = re.search(r'([^--]+)--neg', str(self.info[f'{promptKey}']['inputs']['pre_text'])).group(1)\n else:\n self.data[\"prompt\"] = self.info[f'{promptKey}']['inputs'].get('text', None)\n else:\n self.data[\"prompt\"] = promptKey\n negativePromptKey = self.findKeyName(data=self.info, keys=\"negative\")\n if type(negativePromptKey) == list:\n negativePromptKey = self.findKeyName(data=self.info, keys=\"negative\")[0]\n if \"pre_text\" in self.info.get(f'{promptKey}', {}).get('inputs', {}):\n self.data[\"negative_prompt\"] = re.search(r'--neg\\s*([^\\n]+)', str(self.info[f'{negativePromptKey}']['inputs']['pre_text'])).group(1).strip() #I hate ComfyUI\n else:\n self.data[\"negative_prompt\"] = self.info[f'{negativePromptKey}']['inputs'].get('text', None)\n #self.data[\"negative_prompt\"] = type(negativePromptKey)\n else:\n self.data[\"negative_prompt\"] = negativePromptKey\n self.data[\"steps\"] = self.findKeyName(data=self.info, keys=\"steps\")\n self.data[\"sampler\"] = self.findKeyName(data=self.info, keys=\"sampler_name\")\n self.data[\"cfg_scale\"] = self.findKeyName(data=self.info, keys=\"cfg\")\n self.data[\"seed\"] = self.findKeyName(data=self.info, keys=\"noise_seed\") or self.findKeyName(data=self.info, keys=\"seed\")\n self.data[\"size\"] = self.findKeyName(data=self.info, keys=\"resolution\")\n self.data[\"model\"] = self.findKeyName(data=self.info, keys=\"ckpt_name\")\n self.data[\"model_hash\"] = ''\n self.data[\"lora\"] = ''\n return self.data\n\n def save_metadata(self):\n #todo\n pass\n \n def getRaw(self):\n return self.info\n \n def positivePrompt(self):\n if self.compatible == False:\n return -1\n else:\n if self.metadataType == 'parameters':\n positive = \"\"\n return positive\n if self.metadataType == 'comment':\n positive = \"\"\n return positive\n\n '''def negativePrompt(self):\n if self.compatible == False:\n return -1 \n else:\n negative = \"\"\n return negative'''"
},
{
"identifier": "FileHandler",
"path": "file_handler.py",
"snippet": "class FileHandler:\n def __init__(self, main_window):\n self.main_window = main_window\n \n def downloadImage(self, url):\n try:\n response = requests.get(url.toString())\n if response.status_code == 200:\n # get the file extension from the content-type header\n url_filename = os.path.basename(unquote(url.toString()))\n invalid_characters = ['<', '>', ':', '\"', '/', '\\\\', '|', '?', '*']\n for char in invalid_characters:\n url_filename = url_filename.replace(char, '_')\n file_extension = response.headers.get('Content-Type').split('/')[-1]\n # create a unique filename in the current working directory\n filename = f\"{url_filename}.{file_extension}\"\n save_path = os.path.join(os.getcwd(), f\"{url_filename}.{file_extension}\")\n # save the image locally\n with open(filename, 'wb') as file:\n file.write(response.content)\n return save_path\n else:\n print(f\"Failed to download image. HTTP Status Code: {response.status_code}\")\n except Exception as e:\n print(f\"Error downloading image: {e}\")\n return None\n \n def copyTempImage(self, temp_file_path):\n try:\n # create a copy of the image file in the current working directory\n copied_path = os.path.join(os.getcwd(), os.path.basename(temp_file_path))\n shutil.copyfile(temp_file_path, copied_path)\n return copied_path\n except Exception as e:\n print(f\"Error copying temp image: {e}\")\n return None\n \n def openFileDialog(self):\n filenames, _ = QFileDialog.getOpenFileNames(\n self.main_window,\n \"Select image files\",\n \"\",\n \"Images and videos (*.png *.jpg *.gif *.webp *.mp4)\"\n )\n if filenames:\n new_files = []\n new_files = [filename for filename in filenames if not self.isFileInList(filename)]\n self.updateFileList(new_files)\n \n def updateFileList(self, file_paths):\n for file_path in file_paths:\n item = QListWidgetItem(file_path)\n self.main_window.fileList.addItem(item)\n\n if self.main_window.fileList.count() > 0:\n last_item = self.main_window.fileList.item(self.main_window.fileList.count() - 1)\n self.main_window.fileList.setCurrentItem(last_item)\n self.main_window.viewMetadata(last_item)\n else:\n self.main_window.viewMetadata(None)\n \n def clearFileList(self):\n self.main_window.fileList.clear()\n #self.main_window.imageScene.clear()\n self.main_window.selectedFile.clear()\n for _, widget, _ in self.main_window.widgetInfo:\n widget.clear()\n self.main_window.viewMetadata(None)\n\n def removeSelectedItem(self):\n selectedItem = self.main_window.fileList.currentItem()\n if selectedItem:\n selectedIndex = self.main_window.fileList.row(selectedItem)\n self.main_window.fileList.takeItem(selectedIndex)\n # if last index\n if selectedIndex == (self.main_window.fileList.count()):\n if self.main_window.fileList.count() > 0:\n last_item = self.main_window.fileList.item(self.main_window.fileList.count() - 1)\n self.main_window.fileList.setCurrentItem(last_item)\n self.main_window.viewMetadata(last_item)\n else:\n self.main_window.viewMetadata(None)\n else:\n self.main_window.viewMetadata(self.main_window.fileList.item(selectedIndex))\n\n def getFilesFromFolder(self, path):\n folder_path = Path(path)\n png_files = list(folder_path.rglob('*.[pP][nN][gG]'))\n jpg_files = list(folder_path.rglob('*.[jJ][pP][gG]'))\n webp_files = list(folder_path.rglob('*.[wW][eE][bB][pP]'))\n gif_files = list(folder_path.rglob('*.[gG][iI][fF]'))\n mp4_files = list(folder_path.rglob('*.[mM][pP][4]'))\n png_files = [str(file_path).replace('\\\\', '/') for file_path in png_files]\n jpg_files = [str(file_path).replace('\\\\', '/') for file_path in jpg_files]\n webp_files = [str(file_path).replace('\\\\', '/') for file_path in webp_files]\n gif_files = [str(file_path).replace('\\\\', '/') for file_path in gif_files]\n mp4_files = [str(file_path).replace('\\\\', '/') for file_path in mp4_files]\n image_files = set(png_files + jpg_files + webp_files + gif_files + mp4_files)\n unique_image_files = image_files\n\n return unique_image_files\n\n def isFileInList(self, file_path):\n for row in range(self.main_window.fileList.count()):\n item = self.main_window.fileList.item(row)\n if item.text() == file_path:\n return True\n return False\n\n def getFileList(self):\n return [self.main_window.fileList.item(row).text() for row in range(self.main_window.fileList.count())]"
},
{
"identifier": "CustomDockWidget",
"path": "custom_widgets.py",
"snippet": "class CustomDockWidget(QDockWidget):\n def __init__(self, main_window, parent=None):\n super().__init__(parent)\n self.setAcceptDrops(True)\n self.main_window = main_window\n\n def dragEnterEvent(self, event):\n self.main_window.dragEnterEvent(event)\n\n def dropEvent(self, event):\n self.main_window.dropEvent(event)"
},
{
"identifier": "CustomLineEdit",
"path": "custom_widgets.py",
"snippet": "class CustomLineEdit(QLineEdit):\n def keyPressEvent(self, event):\n if event.key() == Qt.Key.Key_Tab:\n self.focusNextPrevChild(True)\n else:\n super().keyPressEvent(event)"
},
{
"identifier": "CustomTextEdit",
"path": "custom_widgets.py",
"snippet": "class CustomTextEdit(QTextEdit):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.document().contentsChanged.connect(self.adjustSize)\n\n def adjustSize(self):\n document_height = self.document().size().height()\n current_height = self.height()\n if document_height != current_height:\n self.setFixedHeight(int(document_height) + 10 if document_height < 150 else 150)\n \n def showEvent(self, event):\n super().showEvent(event)\n self.adjustSize()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key.Key_Tab:\n event.ignore()\n else:\n super().keyPressEvent(event)"
},
{
"identifier": "CustomListWidget",
"path": "custom_widgets.py",
"snippet": "class CustomListWidget(QListWidget):\n def wheelEvent(self, event: QWheelEvent):\n current_index = self.currentRow()\n total_items = self.count()\n if total_items == 0:\n return\n new_index = (current_index - 1) % total_items if event.angleDelta().y() > 0 else (current_index + 1) % total_items\n self.setCurrentRow(new_index)"
},
{
"identifier": "CustomTitleBar",
"path": "custom_widgets.py",
"snippet": "class CustomTitleBar(StandardTitleBar):\n def __init__(self, parent):\n super().__init__(parent)\n font=QFont(\"Segoe UI\", 10)\n self.minBtn.setHoverColor(Qt.GlobalColor.white)\n self.minBtn.setHoverBackgroundColor(QColor(0, 100, 182))\n self.minBtn.setPressedColor(Qt.GlobalColor.white)\n self.minBtn.setPressedBackgroundColor(QColor(54, 57, 65))\n self.minBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.maxBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.closeBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.titleLabel.setFont(font)\n self.titleLabel.setStyleSheet(\"\"\"\n QFont {\n font: Segoe UI,\n font_size: 10\n }\n \"\"\")"
},
{
"identifier": "ZoomableGraphicsView",
"path": "custom_widgets.py",
"snippet": "class ZoomableGraphicsView(QGraphicsView):\n def __init__(self, parent=None):\n super(ZoomableGraphicsView, self).__init__(parent)\n self.current_zoom = 1.0\n self.minimum_zoom = 0.1\n self.maximum_zoom = 25.0\n\n def wheelEvent(self, event: QWheelEvent):\n event.accept()\n factor = 1.2 if event.angleDelta().y() > 0 else 1.0 / 1.2\n new_zoom = self.current_zoom * factor\n new_zoom = max(self.minimum_zoom, min(self.maximum_zoom, new_zoom))\n scale_factor = new_zoom / self.current_zoom\n self.current_zoom = new_zoom\n self.scale(scale_factor, scale_factor)\n \n def resetZoom(self):\n self.current_zoom = 1.0"
},
{
"identifier": "resource_path",
"path": "icon.py",
"snippet": "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)"
},
{
"identifier": "AboutDialog",
"path": "about_dialog.py",
"snippet": "class AboutDialog(FramelessDialog):\n def __init__(self, main_window):\n super().__init__()\n self.main_window = main_window\n layout = QVBoxLayout(self)\n hlayout = QHBoxLayout()\n vlayout = QVBoxLayout()\n vlayout.setSpacing(0)\n vlayout.setContentsMargins(0, 0, 0, 0)\n logo = QLabel()\n icon = resource_path(\"icon/icon.ico\")\n pixmap = QPixmap(icon)\n pixmap = pixmap.scaledToWidth(80)\n logo.setPixmap(pixmap)\n font = QFont()\n font.setBold(True)\n font.setPointSize(16)\n title = QLabel(\"SDIMV\")\n title.setFont(font)\n githubLink = QLabel('<a href=\"https://github.com/maagic6/SDIMV\">GitHub</a>')\n githubLink.setOpenExternalLinks(True)\n vlayout.addWidget(title, alignment=Qt.AlignmentFlag.AlignTop)\n vlayout.addWidget(QLabel(\"v1.2.1\"), alignment=Qt.AlignmentFlag.AlignTop)\n vlayout.addWidget(githubLink, alignment=Qt.AlignmentFlag.AlignBottom)\n hlayout.addWidget(logo)\n hlayout.addLayout(vlayout)\n layout.addLayout(hlayout)\n self.setFixedSize(240,120)\n self.setContentsMargins(0,0,35,0)\n self.setWindowTitle(\"About\")\n \n def closeEvent(self, event):\n self.main_window.setEnabled(True)\n self.deleteLater()\n event.accept()\n\n def showEvent(self, event):\n main_window_center = self.main_window.geometry().center()\n self.move(main_window_center - self.rect().center())\n super().showEvent(event)"
}
] | import sys, subprocess, qdarkstyle
from PyQt6.QtWidgets import (
QApplication,
QFrame,
QGraphicsPixmapItem,
QGraphicsScene,
QGraphicsView,
QGridLayout,
QLabel,
QLineEdit,
QMenu,
QToolBar,
QVBoxLayout,
QHBoxLayout,
QWidget,
QPushButton,
QScrollArea,
QDockWidget,
QMessageBox,
)
from PyQt6.QtGui import QIcon, QAction, QFont, QPainter, QMovie, QPixmap, QDesktopServices
from PyQt6.QtCore import Qt, QRectF, QEvent, QUrl, QSettings, QSystemSemaphore, QSharedMemory
from PyQt6.QtMultimedia import QMediaPlayer
from PyQt6.QtMultimediaWidgets import QGraphicsVideoItem
from pathlib import Path
from qframelesswindow import FramelessMainWindow
from image import imageProcess
from file_handler import FileHandler
from custom_widgets import CustomDockWidget, CustomLineEdit, CustomTextEdit, CustomListWidget, CustomTitleBar, ZoomableGraphicsView
from icon import resource_path
from about_dialog import AboutDialog | 7,447 | self.imageScene.removeItem(self.video_item)
self.media_player.deleteLater()
self.video_item.deleteLater()
self.isMediaPlayerDeleted = True
#del self.media_player
#del self.video_item
except Exception as e:
print(f"Exception when disconnecting media player: {e}")
self.imageScene.clear()
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir() or Path(file_path).suffix.lower() in ['.png', '.gif', '.webp', '.mp4', '.jpg']:
# accept local files
event.acceptProposedAction()
return
elif url.scheme() in ('http', 'https'):
# accept image links
event.acceptProposedAction()
return
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
new_files = []
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif 'Temp' in Path(file_path).parts:
copied_path = self.fileHandler.copyTempImage(file_path)
new_files.append(copied_path)
else:
new_files.append(file_path)
elif url.scheme() == 'http' or url.scheme() == 'https':
downloaded_path = self.fileHandler.downloadImage(url)
if downloaded_path and not self.fileHandler.isFileInList(downloaded_path):
new_files.append(downloaded_path)
new_files = [file_path for file_path in new_files if not self.fileHandler.isFileInList(file_path)]
self.fileHandler.updateFileList(new_files)
event.acceptProposedAction()
def handleItemSelectionChanged(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
self.viewMetadata(selectedItem)
#if 0 <= selectedIndex < len(self.selectedFiles):
#self.viewMetadata(selectedItem)
def updateImageView(self):
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def updateVideoView(self):
self.imageView.resetTransform()
self.imageScene.setSceneRect(self.video_item.boundingRect())
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def saveSettings(self):
file_paths = self.fileHandler.getFileList()
self.settings.setValue("selectedFiles", file_paths)
self.settings.setValue("main_window_state", self.saveState())
self.settings.setValue("main_window_geometry", self.saveGeometry())
def loadSettings(self):
file_paths = self.settings.value("selectedFiles", [])
self.fileHandler.updateFileList(file_paths)
if self.settings.value("main_window_state"):
self.restoreState(self.settings.value("main_window_state"))
def closeEvent(self, event):
self.saveSettings()
event.accept()
def eventFilter(self, obj, event):
if obj == self:
if event.type() == QEvent.Type.Resize:
self.updateImageView()
if obj in (self.fileListWidget, self.imageViewWidget):
if event.type() == QEvent.Type.Move:
self.updateImageView()
return super(MainWindow, self).eventFilter(obj, event)
def showContextMenu(self, event):
menu = QMenu(self)
view_action = QAction("View", self)
view_action.triggered.connect(self.openImage)
openfolder_action = QAction("Open folder", self)
openfolder_action.triggered.connect(self.openFolder)
remove_action = QAction("Remove", self)
remove_action.triggered.connect(self.fileHandler.removeSelectedItem)
menu.addAction(view_action)
menu.addAction(openfolder_action)
menu.addAction(remove_action)
menu.exec(self.fileList.mapToGlobal(event))
def openFolder(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
folder_path = Path(selectedFile).parent
QDesktopServices.openUrl(QUrl.fromLocalFile(str(folder_path)))
def openImage(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
subprocess.run(['start', '', selectedFile], shell=True)
def showAboutDialog(self):
self.setEnabled(False)
|
class MainWindow(FramelessMainWindow):
def __init__(self):
super().__init__()
self.fileHandler = FileHandler(self)
#window size
self.setTitleBar(CustomTitleBar(self))
self.setWindowTitle('SDIMV')
self.titleBar.raise_()
self.settings = QSettings("maagic6", "SDIMV")
savedGeometry = self.settings.value("main_window_geometry")
if savedGeometry is not None:
self.restoreGeometry(savedGeometry)
else:
self.resize(720,720)
qr = self.frameGeometry()
cp = self.screen().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
iconPath = resource_path("icon/icon.ico")
self.setWindowIcon(QIcon(iconPath))
toolbar = QToolBar("Toolbar")
toolbar.setStyleSheet("QToolBar {background: transparent;}"
"QToolButton {background: transparent; border: none;}"
"QToolButton:hover {background: rgba(195, 195, 255, 50);}")
iconPath2 = resource_path("icon/add.png")
iconPath3 = resource_path("icon/remove.png")
iconPath4 = resource_path("icon/clear.png")
iconPath5 = resource_path("icon/about.png")
addAction = QAction(QIcon(iconPath2), "Add", self)
addAction.triggered.connect(self.fileHandler.openFileDialog)
removeAction = QAction(QIcon(iconPath3), "Remove", self)
removeAction.triggered.connect(self.fileHandler.removeSelectedItem)
clearAction = QAction(QIcon(iconPath4), "Clear", self)
clearAction.triggered.connect(self.fileHandler.clearFileList)
aboutAction = QAction(QIcon(iconPath5), "About", self)
aboutAction.triggered.connect(self.showAboutDialog)
toolbar.addAction(addAction)
toolbar.addAction(removeAction)
toolbar.addAction(clearAction)
toolbar.addAction(aboutAction)
toolbar.setObjectName("Toolbar")
self.addToolBar(toolbar)
self.imagePreviewFrame = QFrame()
self.imagePreviewFrame.setFrameShape(QFrame.Shape.Box)
self.imagePreviewFrame.setLineWidth(1)
self.imagePreviewFrame.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.imageFrame = QVBoxLayout()
self.imagePreviewFrame.setLayout(self.imageFrame)
self.imageScene = QGraphicsScene()
self.imageView = ZoomableGraphicsView(self.imageScene)
self.imageView.setRenderHint(QPainter.RenderHint.Antialiasing, True)
self.imageView.setDragMode(QGraphicsView.DragMode.ScrollHandDrag)
self.imageView.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.imageFrame.addWidget(self.imageView)
self.fileList = CustomListWidget()
self.fileList.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.fileList.customContextMenuRequested.connect(self.showContextMenu)
self.fileList.itemSelectionChanged.connect(self.handleItemSelectionChanged)
self.selectedFile = QLineEdit()
self.browseButton = QPushButton('Browse')
self.browseButton.clicked.connect(self.fileHandler.openFileDialog)
self.browseButton.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.clearButton = QPushButton('Clear')
self.clearButton.clicked.connect(self.fileHandler.clearFileList)
self.clearButton.setFocusPolicy(Qt.FocusPolicy.NoFocus)
bottomHalf = QScrollArea(self)
bottomHalf.setWidgetResizable(True)
scrollContent = QWidget()
self.gridLayout = QGridLayout(scrollContent)
bottomHalf.setWidget(scrollContent)
self.gridLayout.addWidget(QLabel('Selected file:'), 3, 0)
self.gridLayout.addWidget(self.selectedFile, 4, 0, 1, 5)
self.widgetInfo = [
('Positive prompt:', CustomTextEdit(), 'prompt'),
('Negative prompt:', CustomTextEdit(), 'negative_prompt'),
('Steps:', CustomLineEdit(), 'steps'),
('Sampler:', CustomLineEdit(), 'sampler'),
('CFG scale:', CustomLineEdit(), 'cfg_scale'),
('Seed:', CustomLineEdit(), 'seed'),
('Size:', CustomLineEdit(), 'size'),
('Model hash:', CustomLineEdit(), 'model_hash'),
('Model:', CustomLineEdit(), 'model'),
('LoRA:', CustomLineEdit(), 'lora'),
('Raw:', CustomTextEdit(), 'raw')
]
for row, (label_text, widget, widget_name) in enumerate(self.widgetInfo):
label = QLabel(label_text)
setattr(self, widget_name + "_label", label)
setattr(self, widget_name, widget)
self.gridLayout.addWidget(label, 2*row+5, 0, 1, 5)
self.gridLayout.addWidget(widget, 2*row+5+1, 0, 1, 5)
# set stretch factors
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout.setColumnStretch(3, 1)
self.gridLayout.setColumnStretch(4, 1)
bottomHalf.setMinimumHeight(1)
# set alignments
self.gridLayout.setAlignment(Qt.AlignmentFlag.AlignTop)
self.gridLayout.setAlignment(Qt.AlignmentFlag.AlignLeft)
self.fileListWidget = CustomDockWidget(self)
self.fileListWidget.setObjectName("FileListWidget")
titleBarWidget = QWidget(self)
titleBarLayout = QHBoxLayout(titleBarWidget)
titleLabel = QLabel("File list")
titleBarLayout.addWidget(titleLabel, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarLayout.addStretch()
titleBarLayout.addWidget(self.browseButton, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarLayout.addWidget(self.clearButton, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarWidget.setMaximumHeight(10)
self.fileListWidget.setWidget(self.fileList)
self.fileListWidget.setWindowTitle("File list")
self.fileList.setAcceptDrops(True)
self.fileListWidget.setTitleBarWidget(titleLabel)
self.fileListWidget.setAcceptDrops(True)
self.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, self.fileListWidget)
self.imageViewWidget = QDockWidget()
self.imageViewWidget.setObjectName("ImageViewWidget")
self.imageViewWidget.setWidget(self.imagePreviewFrame)
self.imageViewWidget.setFeatures(QDockWidget.DockWidgetFeature.NoDockWidgetFeatures)
self.imagePreviewFrame.setAcceptDrops(True)
self.imageView.setAcceptDrops(True)
self.imageViewWidget.setTitleBarWidget(QLabel("Image view"))
self.imageViewWidget.setAllowedAreas(Qt.DockWidgetArea.NoDockWidgetArea)
self.imageViewWidget.setAcceptDrops(True)
self.setCentralWidget(self.imageViewWidget)
self.metadataWidget = QDockWidget()
self.metadataWidget.setObjectName("MetadataWidget")
self.metadataWidget.setWidget(bottomHalf)
self.metadataWidget.setTitleBarWidget(QLabel("Metadata"))
self.metadataWidget.setWindowTitle("Metadata")
self.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, self.metadataWidget)
self.setContentsMargins(0,30,0,0)
self.isMediaPlayerDeleted = False
self.isMovieDeleted = False
self.fileListWidget.dockLocationChanged.connect(self.updateImageView)
self.metadataWidget.dockLocationChanged.connect(self.updateImageView)
self.fileListWidget.installEventFilter(self)
self.imageViewWidget.installEventFilter(self)
self.metadataWidget.installEventFilter(self)
self.installEventFilter(self)
# load settings
self.loadSettings()
# enable drop events
self.setAcceptDrops(True)
self.show()
if len(sys.argv) > 1:
new_files = []
for arg in sys.argv[1:]:
file_path = Path(arg)
if file_path.is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif not self.fileHandler.isFileInList(str(file_path)):
new_files.append(str(file_path).replace('\\', '/'))
self.fileHandler.updateFileList(new_files)
def viewMetadata(self, item):
if item:
selectedFile = item.text()
self.selectedFile.setText(item.text())
if Path(selectedFile).exists():
if selectedFile.lower().endswith(('.gif','.webp')):
self.cleanup()
self.movie = QMovie(selectedFile)
#self.imageScene.clear()
self.pixmap_item = QGraphicsPixmapItem()
self.imageScene.addItem(self.pixmap_item)
self.isMovieDeleted = False
self.imageView.resetTransform()
self.movie.start()
self.movie.frameChanged.connect(lambda: self.pixmap_item.setPixmap(self.movie.currentPixmap()))
self.imageScene.setSceneRect(QRectF(self.movie.currentPixmap().rect()))
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
elif selectedFile.lower().endswith(('.png', '.jpg', '.jpeg','.bmp')):
self.cleanup()
pixmap = QPixmap(selectedFile)
#self.imageScene.clear()
self.imageScene.addPixmap(pixmap)
self.imageView.setScene(self.imageScene)
self.imageView.resetTransform()
self.imageScene.setSceneRect(QRectF(pixmap.rect()))
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
elif selectedFile.lower().endswith(('.mp4', '.mpeg4', '.avi')):
self.cleanup()
#self.imageScene.clear()
self.imageView.resetTransform()
self.media_player = QMediaPlayer()
self.video_item = QGraphicsVideoItem()
self.imageScene.addItem(self.video_item)
self.isMediaPlayerDeleted = False
self.media_player.setVideoOutput(self.video_item)
self.media_player.setSource(QUrl.fromLocalFile(selectedFile))
self.media_player.play()
self.media_player.mediaStatusChanged.connect(self.loopVideo)
self.video_item.nativeSizeChanged.connect(self.updateVideoView)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio) #workaround
self.imageView.resetZoom()
with open(selectedFile, 'rb') as file:
image = imageProcess(file)
prompt = image.positivePrompt()
if prompt == -1:
for _, widget, _ in self.widgetInfo:
widget.setText('')
else:
data = image.getInfo()
for _, widget, key in self.widgetInfo:
if key == 'raw':
widget.setText(str(image.getRaw()))
else:
widget.setText(str(data[key]))
else:
self.cleanup()
#self.imageScene.clear()
self.selectedFile.clear()
for _, widget, _ in self.widgetInfo:
widget.clear()
self.fileHandler.removeSelectedItem()
else:
self.cleanup()
self.imageScene.clear()
self.selectedFile.clear()
for _, widget, _ in self.widgetInfo:
widget.clear()
def loopVideo(self, status):
if status == QMediaPlayer.MediaStatus.EndOfMedia:
self.media_player.setPosition(0)
self.media_player.play()
else:
pass
def cleanup(self):
if hasattr(self, 'movie') and self.movie is not None and self.isMovieDeleted == False:
try:
self.movie.frameChanged.disconnect()
self.movie.stop()
self.imageScene.removeItem(self.pixmap_item)
self.movie.deleteLater()
del self.movie
del self.pixmap_item
self.isMovieDeleted = True
except TypeError as e:
print(f"Exception when disconnecting movie: {e}")
if hasattr(self, 'media_player') and self.media_player is not None and self.isMediaPlayerDeleted == False:
try:
#self.media_player.setSource(QUrl())
self.media_player.mediaStatusChanged.disconnect()
self.media_player.stop()
self.imageScene.removeItem(self.video_item)
self.media_player.deleteLater()
self.video_item.deleteLater()
self.isMediaPlayerDeleted = True
#del self.media_player
#del self.video_item
except Exception as e:
print(f"Exception when disconnecting media player: {e}")
self.imageScene.clear()
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir() or Path(file_path).suffix.lower() in ['.png', '.gif', '.webp', '.mp4', '.jpg']:
# accept local files
event.acceptProposedAction()
return
elif url.scheme() in ('http', 'https'):
# accept image links
event.acceptProposedAction()
return
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
new_files = []
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif 'Temp' in Path(file_path).parts:
copied_path = self.fileHandler.copyTempImage(file_path)
new_files.append(copied_path)
else:
new_files.append(file_path)
elif url.scheme() == 'http' or url.scheme() == 'https':
downloaded_path = self.fileHandler.downloadImage(url)
if downloaded_path and not self.fileHandler.isFileInList(downloaded_path):
new_files.append(downloaded_path)
new_files = [file_path for file_path in new_files if not self.fileHandler.isFileInList(file_path)]
self.fileHandler.updateFileList(new_files)
event.acceptProposedAction()
def handleItemSelectionChanged(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
self.viewMetadata(selectedItem)
#if 0 <= selectedIndex < len(self.selectedFiles):
#self.viewMetadata(selectedItem)
def updateImageView(self):
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def updateVideoView(self):
self.imageView.resetTransform()
self.imageScene.setSceneRect(self.video_item.boundingRect())
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def saveSettings(self):
file_paths = self.fileHandler.getFileList()
self.settings.setValue("selectedFiles", file_paths)
self.settings.setValue("main_window_state", self.saveState())
self.settings.setValue("main_window_geometry", self.saveGeometry())
def loadSettings(self):
file_paths = self.settings.value("selectedFiles", [])
self.fileHandler.updateFileList(file_paths)
if self.settings.value("main_window_state"):
self.restoreState(self.settings.value("main_window_state"))
def closeEvent(self, event):
self.saveSettings()
event.accept()
def eventFilter(self, obj, event):
if obj == self:
if event.type() == QEvent.Type.Resize:
self.updateImageView()
if obj in (self.fileListWidget, self.imageViewWidget):
if event.type() == QEvent.Type.Move:
self.updateImageView()
return super(MainWindow, self).eventFilter(obj, event)
def showContextMenu(self, event):
menu = QMenu(self)
view_action = QAction("View", self)
view_action.triggered.connect(self.openImage)
openfolder_action = QAction("Open folder", self)
openfolder_action.triggered.connect(self.openFolder)
remove_action = QAction("Remove", self)
remove_action.triggered.connect(self.fileHandler.removeSelectedItem)
menu.addAction(view_action)
menu.addAction(openfolder_action)
menu.addAction(remove_action)
menu.exec(self.fileList.mapToGlobal(event))
def openFolder(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
folder_path = Path(selectedFile).parent
QDesktopServices.openUrl(QUrl.fromLocalFile(str(folder_path)))
def openImage(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
subprocess.run(['start', '', selectedFile], shell=True)
def showAboutDialog(self):
self.setEnabled(False) | about_dialog = AboutDialog(self) | 9 | 2023-11-15 19:51:29+00:00 | 12k |
chaiNNer-org/spandrel | src/spandrel/architectures/GRLIR/arch/grl.py | [
{
"identifier": "to_2tuple",
"path": "src/spandrel/architectures/__arch_helpers/timm/helpers.py",
"snippet": "def to_2tuple(x: T | Iterable[T]) -> tuple[T, T]:\n if isinstance(x, str):\n return x, x # type: ignore\n if isinstance(x, collections.abc.Iterable):\n return tuple(x) # type: ignore\n return x, x"
},
{
"identifier": "trunc_normal_",
"path": "src/spandrel/architectures/__arch_helpers/timm/weight_init.py",
"snippet": "def trunc_normal_(\n tensor: torch.Tensor, mean=0.0, std=1.0, a=-2.0, b=2.0\n) -> torch.Tensor:\n r\"\"\"\n Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n NOTE: this impl is similar to the PyTorch `trunc_normal_`, the bounds [a, b] are\n applied while sampling the normal with mean/std applied, therefore a, b args\n should be adjusted to match the range of mean, std args.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)"
},
{
"identifier": "GRLConfig",
"path": "src/spandrel/architectures/GRLIR/arch/config.py",
"snippet": "class GRLConfig:\n out_proj_type: Literal[\"linear\", \"conv2d\"] = \"linear\"\n \"\"\"\n Type of the output projection in the self-attention modules.\n \"\"\"\n local_connection: bool = False\n \"\"\"\n Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.\n \"\"\"\n euclidean_dist: bool = False\n \"\"\"\n use Euclidean distance or inner product as the similarity metric. An ablation study.\n \"\"\"\n double_window: bool = False\n stripe_square: bool = False\n separable_conv_act: bool = False\n use_buffer: bool = False\n \"\"\"\n Whether to use buffer.\n False: the attention masks, tables, and indices are pre-computed. Huge GPU memory consumption when the window size is large.\n True:\n use_efficient_buffer=False: buffers are not shared. computed for each layer during forward pass. Slow forward pass.\n use_efficient_buffer=True: pre-computed and shared buffers. Small GPU memory consumption, fast forward pass. Need to allocate buffers manually.\n \"\"\"\n use_efficient_buffer: bool = False\n \"\"\"\n Whether to use efficient buffer.\n \"\"\""
},
{
"identifier": "EfficientMixAttnTransformerBlock",
"path": "src/spandrel/architectures/GRLIR/arch/mixed_attn_block_efficient.py",
"snippet": "class EfficientMixAttnTransformerBlock(nn.Module):\n r\"\"\"Mix attention transformer block with shared QKV projection and output projection for mixed attention modules.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n pretrained_stripe_size (int): Window size in pre-training.\n attn_type (str, optional): Attention type. Default: cwhv.\n c: residual blocks\n w: window attention\n h: horizontal stripe attention\n v: vertical stripe attention\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n input_resolution: tuple[int, int],\n num_heads_w: int,\n num_heads_s: int,\n window_size: tuple[int, int] | int = 7,\n window_shift=False,\n stripe_size=[8, 8],\n stripe_groups=[None, None],\n stripe_shift=False,\n stripe_type=\"H\",\n mlp_ratio=4.0,\n qkv_bias=True,\n qkv_proj_type=\"linear\",\n anchor_proj_type=\"separable_conv\",\n anchor_one_stage=True,\n anchor_window_down_factor=1,\n drop=0.0,\n attn_drop=0.0,\n drop_path=0.0,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n pretrained_window_size=[0, 0],\n pretrained_stripe_size=[0, 0],\n res_scale=1.0,\n args: GRLConfig = None, # type: ignore\n ):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads_w = num_heads_w\n self.num_heads_s = num_heads_s\n self.window_size = window_size\n self.window_shift = window_shift\n self.stripe_shift = stripe_shift\n self.stripe_type = stripe_type\n self.args = args\n if self.stripe_type == \"W\":\n self.stripe_size = stripe_size[::-1]\n self.stripe_groups = stripe_groups[::-1]\n else:\n self.stripe_size = stripe_size\n self.stripe_groups = stripe_groups\n self.mlp_ratio = mlp_ratio\n self.res_scale = res_scale\n\n self.attn = MixedAttention(\n dim,\n input_resolution,\n num_heads_w,\n num_heads_s,\n window_size,\n window_shift,\n self.stripe_size,\n self.stripe_groups,\n stripe_shift,\n qkv_bias,\n qkv_proj_type,\n anchor_proj_type,\n anchor_one_stage,\n anchor_window_down_factor,\n attn_drop,\n drop,\n pretrained_window_size,\n pretrained_stripe_size,\n args,\n )\n self.norm1 = norm_layer(dim)\n if self.args.local_connection:\n self.conv = CAB(dim)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n\n self.mlp = Mlp(\n in_features=dim,\n hidden_features=int(dim * mlp_ratio),\n act_layer=act_layer,\n drop=drop,\n )\n self.norm2 = norm_layer(dim)\n\n def _get_table_index_mask(self, all_table_index_mask):\n table_index_mask = {\n \"table_w\": all_table_index_mask[\"table_w\"],\n \"index_w\": all_table_index_mask[\"index_w\"],\n }\n if self.stripe_type == \"W\":\n table_index_mask[\"table_s\"] = all_table_index_mask[\"table_sv\"]\n table_index_mask[\"index_a2w\"] = all_table_index_mask[\"index_sv_a2w\"]\n table_index_mask[\"index_w2a\"] = all_table_index_mask[\"index_sv_w2a\"]\n else:\n table_index_mask[\"table_s\"] = all_table_index_mask[\"table_sh\"]\n table_index_mask[\"index_a2w\"] = all_table_index_mask[\"index_sh_a2w\"]\n table_index_mask[\"index_w2a\"] = all_table_index_mask[\"index_sh_w2a\"]\n if self.window_shift:\n table_index_mask[\"mask_w\"] = all_table_index_mask[\"mask_w\"]\n else:\n table_index_mask[\"mask_w\"] = None\n if self.stripe_shift:\n if self.stripe_type == \"W\":\n table_index_mask[\"mask_a2w\"] = all_table_index_mask[\"mask_sv_a2w\"]\n table_index_mask[\"mask_w2a\"] = all_table_index_mask[\"mask_sv_w2a\"]\n else:\n table_index_mask[\"mask_a2w\"] = all_table_index_mask[\"mask_sh_a2w\"]\n table_index_mask[\"mask_w2a\"] = all_table_index_mask[\"mask_sh_w2a\"]\n else:\n table_index_mask[\"mask_a2w\"] = None\n table_index_mask[\"mask_w2a\"] = None\n return table_index_mask\n\n def forward(self, x, x_size, all_table_index_mask):\n # Mixed attention\n table_index_mask = self._get_table_index_mask(all_table_index_mask)\n if self.args.local_connection:\n x = (\n x\n + self.res_scale\n * self.drop_path(self.norm1(self.attn(x, x_size, table_index_mask)))\n + self.conv(x, x_size)\n )\n else:\n x = x + self.res_scale * self.drop_path(\n self.norm1(self.attn(x, x_size, table_index_mask))\n )\n # FFN\n x = x + self.res_scale * self.drop_path(self.norm2(self.mlp(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return (\n f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads=({self.num_heads_w}, {self.num_heads_s}), \"\n f\"window_size={self.window_size}, window_shift={self.window_shift}, \"\n f\"stripe_size={self.stripe_size}, stripe_groups={self.stripe_groups}, stripe_shift={self.stripe_shift}, self.stripe_type={self.stripe_type}, \"\n f\"mlp_ratio={self.mlp_ratio}, res_scale={self.res_scale}\"\n )\n\n def flops(self):\n pass"
},
{
"identifier": "get_stripe_info",
"path": "src/spandrel/architectures/GRLIR/arch/mixed_attn_block_efficient.py",
"snippet": "def get_stripe_info(\n stripe_size_in: list[int],\n stripe_groups_in: list[int | None],\n stripe_shift: bool,\n input_resolution: list[int] | tuple[int, int],\n):\n stripe_size: list[int] = []\n shift_size: list[int] = []\n for s, g, d in zip(stripe_size_in, stripe_groups_in, input_resolution):\n if g is None:\n stripe_size.append(s)\n shift_size.append(s // 2 if stripe_shift else 0)\n else:\n stripe_size.append(d // g)\n shift_size.append(0 if g == 1 else d // (g * 2))\n return stripe_size, shift_size"
},
{
"identifier": "bchw_to_blc",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def bchw_to_blc(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Rearrange a tensor from the shape (B, C, H, W) to (B, L, C).\"\"\"\n return x.flatten(2).transpose(1, 2)"
},
{
"identifier": "blc_to_bchw",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def blc_to_bchw(x: torch.Tensor, x_size: tuple[int, int]) -> torch.Tensor:\n \"\"\"Rearrange a tensor from the shape (B, L, C) to (B, C, H, W).\"\"\"\n B, _L, C = x.shape\n return x.transpose(1, 2).view(B, C, *x_size)"
},
{
"identifier": "calculate_mask",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def calculate_mask(input_resolution, window_size, shift_size):\n \"\"\"\n Use case: 1)\n \"\"\"\n # calculate attention mask for SW-MSA\n if isinstance(shift_size, int):\n shift_size = to_2tuple(shift_size)\n mask_windows = _fill_window(input_resolution, window_size, shift_size)\n\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(\n attn_mask == 0, 0.0\n ) # nW, window_size**2, window_size**2\n\n return attn_mask"
},
{
"identifier": "calculate_mask_all",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def calculate_mask_all(\n input_resolution,\n window_size,\n shift_size,\n anchor_window_down_factor=1,\n window_to_anchor=True,\n):\n \"\"\"\n Use case: 3)\n \"\"\"\n # calculate attention mask for SW-MSA\n anchor_resolution = [s // anchor_window_down_factor for s in input_resolution]\n aws = [s // anchor_window_down_factor for s in window_size]\n anchor_shift = [s // anchor_window_down_factor for s in shift_size]\n\n # mask of window1: nW, Wh**Ww\n mask_windows = _fill_window(input_resolution, window_size, shift_size)\n # mask of window2: nW, AWh*AWw\n mask_anchor = _fill_window(anchor_resolution, aws, anchor_shift)\n\n if window_to_anchor:\n attn_mask = mask_windows.unsqueeze(2) - mask_anchor.unsqueeze(1)\n else:\n attn_mask = mask_anchor.unsqueeze(2) - mask_windows.unsqueeze(1)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(\n attn_mask == 0, 0.0\n ) # nW, Wh**Ww, AWh*AWw\n\n return attn_mask"
},
{
"identifier": "get_relative_coords_table_all",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def get_relative_coords_table_all(\n window_size: tuple[int, int] | list[int],\n pretrained_window_size: tuple[int, int] | list[int] = [0, 0],\n anchor_window_down_factor=1,\n):\n \"\"\"\n Use case: 3)\n\n Support all window shapes.\n Args:\n window_size:\n pretrained_window_size:\n anchor_window_down_factor:\n\n Returns:\n\n \"\"\"\n # get relative_coords_table\n ws = window_size\n aws = [w // anchor_window_down_factor for w in window_size]\n pws = pretrained_window_size\n paws = [w // anchor_window_down_factor for w in pretrained_window_size]\n\n # positive table size: (Ww - 1) - (Ww - AWw) // 2\n ts_p = [w1 - 1 - (w1 - w2) // 2 for w1, w2 in zip(ws, aws)]\n # negative table size: -(AWw - 1) - (Ww - AWw) // 2\n ts_n = [-(w2 - 1) - (w1 - w2) // 2 for w1, w2 in zip(ws, aws)]\n pts = [w1 - 1 - (w1 - w2) // 2 for w1, w2 in zip(pws, paws)]\n\n # TODO: pretrained window size and pretrained anchor window size is only used here.\n # TODO: Investigate whether it is really important to use this setting when finetuning large window size\n # TODO: based on pretrained weights with small window size.\n\n coord_h = torch.arange(ts_n[0], ts_p[0] + 1, dtype=torch.float32)\n coord_w = torch.arange(ts_n[1], ts_p[1] + 1, dtype=torch.float32)\n table = torch.stack(torch.meshgrid([coord_h, coord_w], indexing=\"ij\")).permute(\n 1, 2, 0\n )\n table = table.contiguous().unsqueeze(0) # 1, Wh+AWh-1, Ww+AWw-1, 2\n if pts[0] > 0:\n table[:, :, :, 0] /= pts[0]\n table[:, :, :, 1] /= pts[1]\n else:\n table[:, :, :, 0] /= ts_p[0]\n table[:, :, :, 1] /= ts_p[1]\n table *= 8 # normalize to -8, 8\n table = torch.sign(table) * torch.log2(torch.abs(table) + 1.0) / np.log2(8)\n # 1, Wh+AWh-1, Ww+AWw-1, 2\n return table"
},
{
"identifier": "get_relative_position_index_simple",
"path": "src/spandrel/architectures/GRLIR/arch/ops.py",
"snippet": "def get_relative_position_index_simple(\n window_size: tuple[int, int] | list[int],\n anchor_window_down_factor=1,\n window_to_anchor=True,\n):\n \"\"\"\n Use case: 3)\n This is a simplified version of get_relative_position_index_all\n The start coordinate of anchor window is also (0, 0)\n get pair-wise relative position index for each token inside the window\n \"\"\"\n ws = window_size\n aws = [w // anchor_window_down_factor for w in window_size]\n\n coords = _get_meshgrid_coords((0, 0), window_size) # 2, Wh*Ww\n coords_anchor = _get_meshgrid_coords((0, 0), aws)\n # 2, AWh*AWw\n\n max_horizontal_diff = aws[1] + ws[1] - 1\n if window_to_anchor:\n offset = [w2 - 1 for w2 in aws]\n idx = coords_diff_odd(coords, coords_anchor, offset, max_horizontal_diff)\n else:\n offset = [w1 - 1 for w1 in ws]\n idx = coords_diff_odd(coords_anchor, coords, offset, max_horizontal_diff)\n return idx # Wh*Ww, AWh*AWw or AWh*AWw, Wh*Ww"
},
{
"identifier": "build_last_conv",
"path": "src/spandrel/architectures/GRLIR/arch/swin_v1_block.py",
"snippet": "def build_last_conv(conv_type: str, dim: int):\n if conv_type == \"1conv\":\n block = nn.Conv2d(dim, dim, 3, 1, 1)\n elif conv_type == \"3conv\":\n # to save parameters and memory\n block = nn.Sequential(\n nn.Conv2d(dim, dim // 4, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Conv2d(dim // 4, dim, 3, 1, 1),\n )\n elif conv_type == \"1conv1x1\":\n block = nn.Conv2d(dim, dim, 1, 1, 0)\n elif conv_type == \"linear\":\n block = Linear(dim, dim)\n else:\n raise ValueError(f\"Unsupported conv_type {conv_type}\")\n return block"
},
{
"identifier": "Upsample",
"path": "src/spandrel/architectures/GRLIR/arch/upsample.py",
"snippet": "class Upsample(nn.Module):\n \"\"\"Upsample module.\n Args:\n scale (int): Scale factor. Supported scales: 2^n and 3.\n num_feat (int): Channel number of intermediate features.\n \"\"\"\n\n def __init__(self, scale, num_feat):\n super().__init__()\n m = []\n if (scale & (scale - 1)) == 0: # scale = 2^n\n for _ in range(int(math.log(scale, 2))):\n m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))\n m.append(nn.PixelShuffle(2))\n elif scale == 3:\n m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))\n m.append(nn.PixelShuffle(3))\n else:\n raise ValueError(\n f\"scale {scale} is not supported. \" \"Supported scales: 2^n and 3.\"\n )\n self.up = nn.Sequential(*m)\n\n def forward(self, x):\n return self.up(x)"
},
{
"identifier": "UpsampleOneStep",
"path": "src/spandrel/architectures/GRLIR/arch/upsample.py",
"snippet": "class UpsampleOneStep(nn.Module):\n \"\"\"UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)\n Used in lightweight SR to save parameters.\n Args:\n scale (int): Scale factor. Supported scales: 2^n and 3.\n num_feat (int): Channel number of intermediate features.\n \"\"\"\n\n def __init__(self, scale, num_feat, num_out_ch):\n super().__init__()\n self.num_feat = num_feat\n m = []\n m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))\n m.append(nn.PixelShuffle(scale))\n self.up = nn.Sequential(*m)\n\n def forward(self, x):\n return self.up(x)"
}
] | from typing import Literal
from ...__arch_helpers.timm.helpers import to_2tuple
from ...__arch_helpers.timm.weight_init import trunc_normal_
from .config import GRLConfig
from .mixed_attn_block_efficient import (
EfficientMixAttnTransformerBlock,
get_stripe_info,
)
from .ops import (
bchw_to_blc,
blc_to_bchw,
calculate_mask,
calculate_mask_all,
get_relative_coords_table_all,
get_relative_position_index_simple,
)
from .swin_v1_block import (
build_last_conv,
)
from .upsample import Upsample, UpsampleOneStep
from fairscale.nn import checkpoint_wrapper # type: ignore
import torch
import torch.nn as nn
import torch.nn.functional as F | 8,473 | mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
| """
Efficient and Explicit Modelling of Image Hierarchies for Image Restoration
Image restoration transformers with global, regional, and local modelling
A clean version of the.
Shared buffers are used for relative_coords_table, relative_position_index, and attn_mask.
"""
from __future__ import annotations
class TransformerStage(nn.Module):
"""Transformer stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
conv_type: The convolutional block before residual connection.
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
args:
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used. "local_connection": local_connection,
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
num_heads_window: int,
num_heads_stripe: int,
window_size: tuple[int, int],
stripe_size,
stripe_groups,
stripe_shift,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
pretrained_window_size=[0, 0],
pretrained_stripe_size=[0, 0],
conv_type="1conv",
init_method="",
fairscale_checkpoint=False,
offload_to_cpu=False,
args: GRLConfig = None, # type: ignore
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.init_method = init_method
self.blocks = nn.ModuleList()
for i in range(depth):
block = EfficientMixAttnTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads_w=num_heads_window,
num_heads_s=num_heads_stripe,
window_size=window_size,
window_shift=i % 2 == 0,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_type="H" if i % 2 == 0 else "W",
stripe_shift=i % 4 in [2, 3] if stripe_shift else False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
res_scale=0.1 if init_method == "r" else 1.0,
args=args,
)
# print(fairscale_checkpoint, offload_to_cpu)
if fairscale_checkpoint:
block = checkpoint_wrapper(block, offload_to_cpu=offload_to_cpu)
self.blocks.append(block)
self.conv = build_last_conv(conv_type, dim)
def _init_weights(self):
for n, m in self.named_modules():
if self.init_method == "w":
if isinstance(m, (nn.Linear, nn.Conv2d)) and n.find("cpb_mlp") < 0:
print("nn.Linear and nn.Conv2d weight initilization")
m.weight.data *= 0.1
elif self.init_method == "l":
if isinstance(m, nn.LayerNorm):
print("nn.LayerNorm initialization")
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 0)
elif self.init_method.find("t") >= 0:
scale = 0.1 ** (len(self.init_method) - 1) * int(self.init_method[-1])
if isinstance(m, nn.Linear) and n.find("cpb_mlp") < 0:
trunc_normal_(m.weight, std=scale)
elif isinstance(m, nn.Conv2d):
m.weight.data *= 0.1
print(
"Initialization nn.Linear - trunc_normal; nn.Conv2d - weight rescale."
)
else:
raise NotImplementedError(
f"Parameter initialization method {self.init_method} not implemented in TransformerStage."
)
def forward(self, x, x_size, table_index_mask):
res = x
for blk in self.blocks:
res = blk(res, x_size, table_index_mask)
res = bchw_to_blc(self.conv(blc_to_bchw(res, x_size)))
return res + x
def flops(self):
pass
class GRL(nn.Module):
r"""Image restoration transformer with global, non-local, and local connections
Args:
img_size (int | list[int]): Input image size. Default 64
in_channels (int): Number of input image channels. Default: 3
out_channels (int): Number of output image channels. Default: None
embed_dim (int): Patch embedding dimension. Default: 96
upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range (float): Image range. 1. or 255.
upsampler (str): The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
depths (list[int]): Depth of each Swin Transformer layer.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
window_size (int): Window size. Default: 8.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
conv_type (str): The convolutional block before residual connection. Default: 1conv. Choices: 1conv, 3conv, 1conv1x1, linear
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
img_size=64,
in_channels: int = 3,
out_channels: int | None = None,
embed_dim=96,
upscale=2,
img_range=1.0,
upsampler="",
depths: list[int] = [6, 6, 6, 6, 6, 6],
num_heads_window: list[int] = [3, 3, 3, 3, 3, 3],
num_heads_stripe: list[int] = [3, 3, 3, 3, 3, 3],
window_size=8,
stripe_size: list[int] = [8, 8], # used for stripe window attention
stripe_groups: list[int | None] = [None, None],
stripe_shift=False,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
) | self.upsample = Upsample(upscale, num_out_feats) | 12 | 2023-11-17 01:11:47+00:00 | 12k |
motexture/VSeq2VSeq | models/unet.py | [
{
"identifier": "TransformerTemporalModel",
"path": "models/transformers.py",
"snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n num_frames=1,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)"
},
{
"identifier": "Conditioner",
"path": "models/resnet.py",
"snippet": "class Conditioner(nn.Module):\n def __init__(self, dim, dim_out, kernel_size, **kwargs):\n super().__init__()\n\n self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size, **kwargs)\n self.conditioning_conv = nn.Conv2d(dim, dim_out, kernel_size, **kwargs)\n \n def forward(self, hidden_states, conditioning_hidden_states): \n hidden_states = self.spatial_conv(hidden_states)\n conditioning_hidden_states = self.conditioning_conv(conditioning_hidden_states)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "CrossAttnDownBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n resnets = []\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(\n self,\n hidden_states,\n conditioning_hidden_states,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n output_states = ()\n conditioning_output_states = ()\n\n for resnet, temp_conv, attn, temp_attn, temp_cond_attn in zip_longest(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions, self.temp_conditioning_attentions\n ):\n \n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states, conditioning_hidden_states = downsampler(hidden_states, conditioning_hidden_states)\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n return hidden_states, output_states, conditioning_hidden_states, conditioning_output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n attentions.append(\n Transformer2DModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n out_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n conditioning_hidden_states,\n res_conditioning_hidden_states_tuple,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None,\n ):\n for resnet, temp_conv, attn, temp_attn, temp_cond_attn in zip_longest(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions, self.temp_conditioning_attentions\n ):\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n res_conditioning_hidden_states = res_conditioning_hidden_states_tuple[-1]\n res_conditioning_hidden_states_tuple = res_conditioning_hidden_states_tuple[:-1]\n conditioning_hidden_states = torch.cat([conditioning_hidden_states, res_conditioning_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states, conditioning_hidden_states = upsampler(hidden_states, conditioning_hidden_states, upsample_size)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n\n self.gradient_checkpointing = False\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n def forward(self, hidden_states, conditioning_hidden_states, h_emb=None, c_emb=None, num_frames=1):\n output_states = ()\n conditioning_output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames) \n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states, conditioning_hidden_states = downsampler(hidden_states, conditioning_hidden_states)\n\n output_states += (hidden_states,)\n conditioning_output_states += (conditioning_hidden_states,)\n\n return hidden_states, output_states, conditioning_hidden_states, conditioning_output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "models/unet_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.gradient_checkpointing = False\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=dropout\n )\n ]\n attentions = []\n temp_attentions = []\n temp_conditioning_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n temp_conditioning_attentions.append(\n TransformerTemporalConditioningModel(\n in_channels // attn_num_head_channels,\n attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n norm_num_groups=resnet_groups,\n only_cross_attention=True\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n self.temp_conditioning_attentions = nn.ModuleList(temp_conditioning_attentions)\n\n def forward(\n self,\n hidden_states,\n conditioning_hidden_states,\n h_emb=None,\n c_emb=None,\n encoder_hidden_states=None,\n attention_mask=None,\n num_frames=1,\n cross_attention_kwargs=None\n ):\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n \n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.resnets[0]), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.temp_convs[0]), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = self.resnets[0](hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames)\n \n for attn, temp_attn, temp_cond_attn, resnet, temp_conv in zip_longest(\n self.attentions, self.temp_attentions, self.temp_conditioning_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n if self.gradient_checkpointing:\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n \n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states,)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_attn, return_dict=False), hidden_states, num_frames)[0]\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_cond_attn, return_dict=False), hidden_states, conditioning_hidden_states, num_frames)[0]\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states,cross_attention_kwargs=cross_attention_kwargs,).sample\n hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample\n hidden_states = temp_cond_attn(hidden_states, conditioning_hidden_states, num_frames=num_frames).sample\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames) \n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "models/unet_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n\n resnets = []\n temp_convs = []\n self.gradient_checkpointing = False\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=dropout\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n def forward(\n self, \n hidden_states, \n res_hidden_states_tuple, \n conditioning_hidden_states, \n res_conditioning_hidden_states_tuple, \n h_emb=None,\n c_emb=None,\n upsample_size=None, \n num_frames=1\n ):\n \n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n res_conditioning_hidden_states = res_conditioning_hidden_states_tuple[-1]\n res_conditioning_hidden_states_tuple = res_conditioning_hidden_states_tuple[:-1]\n conditioning_hidden_states = torch.cat([conditioning_hidden_states, res_conditioning_hidden_states], dim=1)\n\n if self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states, conditioning_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames)\n else:\n hidden_states, conditioning_hidden_states = resnet(hidden_states, conditioning_hidden_states, h_emb, c_emb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states, conditioning_hidden_states = upsampler(hidden_states, conditioning_hidden_states, upsample_size)\n\n return hidden_states, conditioning_hidden_states"
},
{
"identifier": "get_down_block",
"path": "models/unet_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n only_cross_attention=False,\n upcast_attention=False\n):\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "models/unet_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n only_cross_attention=False,\n upcast_attention=False\n):\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from safetensors.torch import load_file
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import WEIGHTS_NAME
from .transformers import TransformerTemporalModel
from .resnet import Conditioner
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
import os | 9,319 | The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid | self.mid_block = UNetMidBlock3DCrossAttn( | 5 | 2023-11-14 09:09:09+00:00 | 12k |
TCLResearchEurope/torch-dag | torch_dag_algorithms/pruning/orbitalize_model.py | [
{
"identifier": "DagModule",
"path": "torch_dag/core/dag_module.py",
"snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_"
},
{
"identifier": "log_dag_characteristics",
"path": "torch_dag/commons/flops_computation.py",
"snippet": "def log_dag_characteristics(\n dag: dag_module.DagModule,\n input_shape_without_batch: Tuple[int, ...],\n):\n dag.clear_custom_buffers()\n if len(input_shape_without_batch) != 3:\n logger.warning(f'One cannot compute `kmapp` for cell: {dag.name}, since the input_shape_without_batch '\n f'has length less than 2.')\n return\n dag.eval()\n x = torch.ones(size=(1,) + input_shape_without_batch, device=dag.device)\n y = dag(x)\n if isinstance(y, torch.Tensor):\n y = [y]\n static_kmapp = compute_static_kmapp(dag, input_shape_without_batch)\n logger.info(f'static_kmapp: {static_kmapp}')\n num_params = get_num_params(dag) / 1e6\n logger.info(f'number params (M): {num_params}')\n logger.info(f'number of output tensors: {len(y)}')\n for k, tensor in enumerate(y):\n logger.info(f'output shape of output tensor {k}: {tensor.shape}')\n dag.clear_custom_buffers()"
},
{
"identifier": "dag_orbitalizer",
"path": "torch_dag_algorithms/pruning/dag_orbitalizer.py",
"snippet": "def log_kmapps_stats(\n dag: DagModule,\n input_shape_without_batch: Tuple[int, ...],\n orbits: List[Orbit],\n) -> Union[Tuple[float, float], None]:\n def orbitalize(self, dag: DagModule, *args, **kwargs) -> Tuple[DagModule, Optional[List[Orbit]]]:\n def __init__(\n self,\n pruning_mode: str = PRUNING_DEFAULT_MODE_NAME,\n truncate_on: nn.Module = TRUNCATE_ON,\n block_size: Optional[int] = None,\n custom_unprunable_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ):\n def check_and_handle_known_custom_patterns(self, extended_orbit: Orbit) -> Union[Orbit, List[Orbit]]:\n def orbitalize(\n self,\n dag: DagModule,\n prune_stem: bool = False,\n vis_final_orbits: bool = True,\n input_shape: Tuple[int, ...] = [(1, 3, 256, 256)],\n vis_saving_dir: str = None,\n skip_orbits_with_channels_less_than_block_size: bool = False,\n remove_tensor_mergers_and_extractors: bool = True,\n return_stats: bool = False,\n force_log_stats: bool = True,\n ) -> Union[Tuple[DagModule, List[Orbit]], Tuple[DagModule, List[Orbit], float, float]]:\nclass Orbitalizer(ABC):\nclass GeneralOrbitalizer(Orbitalizer):"
},
{
"identifier": "constants",
"path": "torch_dag_algorithms/pruning/constants.py",
"snippet": "PRUNING_DEFAULT_MODE_NAME = 'default'\nPRUNING_BLOCK_SNPE_MODE_NAME = 'block_snpe'\nPRUNING_WHOLE_BLOCK_MODE_NAME = 'whole_block'\nINITIAL_LOGITS_VALUE_FOR_PRUNING = 3.\nDEFAULT_TEMPERATURE = 0.5\nDEFAULT_TEMPERATURE_DECAY_RATE = 1.0\nDEFAULT_NUM_TRAINING_STEPS = 100000\nTEMPERATURE_LOWER_BOUND = 0.5\nMULTIADDS_BASIS = 1000.\nMAX_LOGITS_ABS_VALUE = 6.\nBLOCK_SIZE_FOR_CHANNELS = 32\nSIMPLE_ORBIT_LOGITS_MULTIPLIER = 0.01"
},
{
"identifier": "DagVisualizer",
"path": "torch_dag/visualization/visualize_dag.py",
"snippet": "class DagVisualizer:\n\n def __init__(self, dag: DagModule):\n self.dag = dag\n self.dag.cache_forward_dict = True\n self.flops_list = None\n\n @staticmethod\n def get_name(namescope_index: str, index: str):\n if namescope_index is None:\n return f\"{index}\"\n else:\n return f\"{namescope_index}_{index}\"\n\n def visualize(\n self,\n max_depth: int = 0,\n input_shape: Tuple[int, ...] = None,\n saving_path: Optional[str] = None,\n saving_format: str = \"pdf\",\n ):\n if input_shape is not None:\n self.dag.eval()\n _ = self.dag(torch.ones(size=input_shape))\n if max_depth == 0:\n self.flops_list = build_full_flops_list(\n dag=self.dag, input_shape_without_batch=input_shape[1:], normalize=True)\n\n graph, input_node_names, output_node_names = self._visualize(\n dag=self.dag,\n max_depth=max_depth,\n )\n if saving_path is not None:\n graph.render(saving_path, format=saving_format)\n\n return graph\n\n # def get_weights_stats(self, node: nd.nodes.Node):\n # if isinstance(node, (nd.ops.Conv2D, nd.ops.DepthwiseConv)):\n # return self.compute_tensor_stats(node.filters)\n # elif isinstance(node, nd.ops.Dense):\n # return self.compute_tensor_stats(node.kernel)\n # else:\n # return None\n\n def get_module_meta(self, module: nn.Module) -> Dict:\n meta = {}\n if isinstance(module, nn.Conv2d):\n meta['kernel_size'] = module.kernel_size\n meta['in_channels'] = module.in_channels\n meta['out_channels'] = module.out_channels\n meta['groups'] = module.groups\n elif isinstance(module, smodules.ACTIVATION_MODULES):\n meta['activation_function'] = module.__class__.__name__\n # weights_stats = self.get_weights_stats(node)\n # if weights_stats is not None:\n # mean, std, maximum, minimum = weights_stats\n # meta['weights_mean_std_max_min'] = f'{mean:.3f}, {std:.3f}, {maximum:.3f}, {minimum:.3f}'\n\n return meta\n\n def add_nested_dag_as_subgraph(\n self,\n g: graphviz.Digraph,\n name: str,\n dag: DagModule,\n depth: int,\n max_depth: int,\n ) -> Tuple[graphviz.Digraph, List[str], List[str]]:\n with g.subgraph(name=f'cluster_{dag.name}') as s:\n fillcolor = self.get_depth_fill_color(depth)\n s.attr(\n label=f'{dag.name}',\n style='filled',\n fillcolor=fillcolor,\n )\n return self._visualize(\n dag=dag,\n graph=s,\n namescope_index=name,\n max_depth=max_depth,\n depth=depth,\n )\n\n def get_depth_fill_color(self, depth: int):\n if depth == 1:\n return DEPTH_1_FILL_COLOR\n elif depth == 2:\n return DEPTH_2_FILL_COLOR\n\n def compute_tensor_stats(self, tensor: torch.Tensor):\n mean = tensor.mean()\n std = torch.std(tensor)\n maximum = tensor.max()\n minimum = tensor.min()\n return mean, std, maximum, minimum\n\n def _visualize(\n self,\n dag: DagModule,\n graph: graphviz.Digraph = None,\n namescope_index: str = None,\n max_depth: int = 0,\n depth: int = 0,\n ) -> Tuple[graphviz.Digraph, List[str], List[str]]:\n if graph is None:\n g = graphviz.Digraph('model')\n else:\n g = graph\n g.node_attr.update(style='filled', shape='box')\n go_deeper = True if max_depth > 0 else False\n names = {}\n input_vertex_names = []\n output_vertex_names = []\n for k, vertex in enumerate(dag.vertices):\n name = self.get_name(namescope_index, str(k))\n names[k] = name\n if isinstance(vertex, InputVertex):\n label = f'input_{k}'\n g.node(\n name,\n label=label,\n color=NODE_SHAPE_COLOR,\n fillcolor=INPUT_COLOR,\n shape=INPUT_SHAPE,\n )\n input_vertex_names.append(name)\n\n else:\n predecessors_indices = [dag.vertices.index(pd) for pd in vertex.predecessors]\n if isinstance(vertex.module, DagModule) and go_deeper:\n sgraph, inputs, outputs = self.add_nested_dag_as_subgraph(\n g=g,\n name=name,\n dag=vertex.module,\n depth=depth + 1,\n max_depth=max_depth - 1\n )\n\n for l, pd in enumerate(predecessors_indices):\n edge = names[pd], inputs[l]\n g.edge(edge[0], edge[1])\n names[k] = outputs[0]\n if vertex == dag.output_vertex:\n output_vertex_names = [name]\n\n else:\n module = vertex.module\n fillcolor = get_vertex_color(module)\n color = get_vertex_color(module)\n\n label = f'{vertex.name}'\n label += f'_idx_{dag.vertices.index(vertex)}'\n if max_depth == 0 and depth == 0 and self.flops_list is not None:\n flops = self.flops_list[self.dag.inner_vertices.index(vertex)]\n label += f' \\n kmapp: {flops}'\n if vertex.orbit is not None:\n label += f' \\n orbit: {vertex.orbit}'\n\n if len(self.get_module_meta(module).keys()) > 0:\n label += f' \\n ----------'\n\n # add meta node info\n for key, value in self.get_module_meta(module).items():\n label += f' \\n {key}: {value}'\n\n # add output shape visualization\n if dag.forward_dict is not None:\n if isinstance(vertex.module, smodules.ArgModule):\n pass\n else:\n label += f' \\n ----------'\n tensors = dag.forward_dict[vertex]\n if not isinstance(dag.forward_dict[vertex], List):\n tensors = [tensors]\n shapes = []\n for el in tensors:\n if isinstance(el, torch.Tensor):\n shapes.append(tuple([int(e) for e in el.shape]))\n else:\n shapes.append(tuple())\n\n for tensor, shape in zip(tensors, shapes):\n label += f' \\n {shape}'\n if isinstance(tensor, torch.Tensor) and tensor.dtype == torch.float:\n mean, std, maximum, minimum = self.compute_tensor_stats(tensor)\n label += f' \\n mean: {mean:.3f}, std: {std:.3f}, max: {maximum:.3f}, min: {minimum:.3f}'\n\n if vertex == dag.output_vertex:\n shape = OUTPUT_SHAPE\n fillcolor = f'{OUTPUT_COLOR}:{fillcolor}'\n output_vertex_names = [name]\n else:\n shape = NODE_SHAPE\n\n g.node(\n name,\n label=label,\n shape=shape,\n color=NODE_SHAPE_COLOR,\n fillcolor=fillcolor,\n )\n for pd in predecessors_indices:\n edge = names[pd], names[k]\n g.edge(edge[0], edge[1])\n\n return g, input_vertex_names, output_vertex_names"
}
] | import argparse
import logging
import os
from torch_dag.core.dag_module import DagModule
from torch_dag.commons.flops_computation import log_dag_characteristics
from torch_dag_algorithms.pruning import dag_orbitalizer
from torch_dag_algorithms.pruning import constants
from torch_dag.visualization.visualize_dag import DagVisualizer | 7,264 | #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
PRUNING_MODES = [
constants.PRUNING_DEFAULT_MODE_NAME,
constants.PRUNING_BLOCK_SNPE_MODE_NAME,
constants.PRUNING_WHOLE_BLOCK_MODE_NAME,
]
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--model_path",
type=str,
)
arg_parser.add_argument(
"--saving_path",
type=str,
)
arg_parser.add_argument(
"--block_size",
default=8,
type=int,
)
arg_parser.add_argument(
"--pruning_mode",
type=str,
default=constants.PRUNING_BLOCK_SNPE_MODE_NAME,
choices=PRUNING_MODES,
)
arg_parser.add_argument(
"--input_shape",
type=int,
nargs="+",
help="Input shape to the orbitalized model (including batch dimension).",
)
args = arg_parser.parse_args()
return args
def orbitalize_model(
model_path,
input_shape,
pruning_mode,
block_size,
saving_path,
):
path = model_path
dag = DagModule.load(path)
dag.eval()
input_shape = tuple(input_shape)
log_dag_characteristics(dag, input_shape_without_batch=input_shape[1:])
orbitalizer = dag_orbitalizer.GeneralOrbitalizer(
pruning_mode=pruning_mode,
block_size=block_size,
)
dag, found_final_orbits = orbitalizer.orbitalize(
dag=dag,
prune_stem=True,
input_shape=input_shape,
skip_orbits_with_channels_less_than_block_size=True,
)
if not saving_path:
saving_path = os.path.join(path, "orbitalized")
else:
saving_path = saving_path
dag.save(os.path.join(saving_path))
| #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
PRUNING_MODES = [
constants.PRUNING_DEFAULT_MODE_NAME,
constants.PRUNING_BLOCK_SNPE_MODE_NAME,
constants.PRUNING_WHOLE_BLOCK_MODE_NAME,
]
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--model_path",
type=str,
)
arg_parser.add_argument(
"--saving_path",
type=str,
)
arg_parser.add_argument(
"--block_size",
default=8,
type=int,
)
arg_parser.add_argument(
"--pruning_mode",
type=str,
default=constants.PRUNING_BLOCK_SNPE_MODE_NAME,
choices=PRUNING_MODES,
)
arg_parser.add_argument(
"--input_shape",
type=int,
nargs="+",
help="Input shape to the orbitalized model (including batch dimension).",
)
args = arg_parser.parse_args()
return args
def orbitalize_model(
model_path,
input_shape,
pruning_mode,
block_size,
saving_path,
):
path = model_path
dag = DagModule.load(path)
dag.eval()
input_shape = tuple(input_shape)
log_dag_characteristics(dag, input_shape_without_batch=input_shape[1:])
orbitalizer = dag_orbitalizer.GeneralOrbitalizer(
pruning_mode=pruning_mode,
block_size=block_size,
)
dag, found_final_orbits = orbitalizer.orbitalize(
dag=dag,
prune_stem=True,
input_shape=input_shape,
skip_orbits_with_channels_less_than_block_size=True,
)
if not saving_path:
saving_path = os.path.join(path, "orbitalized")
else:
saving_path = saving_path
dag.save(os.path.join(saving_path)) | visualizer = DagVisualizer(dag=dag) | 4 | 2023-11-17 15:36:44+00:00 | 12k |
joyn-gg/discord.http | discord_http/backend.py | [
{
"identifier": "Command",
"path": "discord_http/commands.py",
"snippet": "class Command:\n def __init__(\n self,\n command: Callable,\n name: str,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None,\n type: ApplicationCommandType = ApplicationCommandType.chat_input,\n ):\n self.id: Optional[int] = None\n self.command = command\n self.cog: Optional[\"Cog\"] = None\n self.type: int = int(type)\n self.name = name\n self.description = description\n self.options = []\n self.default_member_permissions = None\n\n self.name_localizations: Dict[LocaleTypes, str] = {}\n self.description_localizations: Dict[LocaleTypes, str] = {}\n\n self.list_autocompletes: Dict[str, Callable] = {}\n self.guild_ids: list[Union[utils.Snowflake, int]] = guild_ids or []\n self.__list_choices: list[str] = []\n\n if self.type == ApplicationCommandType.chat_input:\n if self.description is None:\n self.description = command.__doc__ or \"No description provided.\"\n if self.name != self.name.lower():\n raise ValueError(\"Command names must be lowercase.\")\n if not 1 <= len(self.description) <= 100:\n raise ValueError(\"Command descriptions must be between 1 and 100 characters.\")\n else:\n self.description = None\n\n if self.type is ApplicationCommandType.chat_input.value and not self.options:\n sig = inspect.signature(self.command)\n self.options = []\n\n slicer = 1\n if sig.parameters.get(\"self\"):\n slicer = 2\n\n for parameter in itertools.islice(sig.parameters.values(), slicer, None):\n origin = getattr(\n parameter.annotation, \"__origin__\",\n parameter.annotation\n )\n\n option = {}\n\n if (\n origin in [Union] and\n len(parameter.annotation.__args__) == 2\n ):\n # Parsing Optional/Union types\n origin = parameter.annotation.__args__[0]\n\n if origin in [Member, User]:\n ptype = CommandOptionType.user\n elif origin in channel_types:\n ptype = CommandOptionType.channel\n option.update({\n \"channel_types\": [\n int(i) for i in channel_types[origin]\n ]\n })\n elif origin in [Attachment]:\n ptype = CommandOptionType.attachment\n elif origin in [Role]:\n ptype = CommandOptionType.role\n elif origin in [Choice]:\n # Temporarily set to string, will be changed later\n self.__list_choices.append(parameter.name)\n ptype = CommandOptionType.string\n elif isinstance(origin, Range):\n ptype = origin.type\n if origin.type == CommandOptionType.string:\n option.update({\n \"min_length\": origin.min,\n \"max_length\": origin.max\n })\n else:\n option.update({\n \"min_value\": origin.min,\n \"max_value\": origin.max\n })\n elif origin == int:\n ptype = CommandOptionType.integer\n elif origin == bool:\n ptype = CommandOptionType.boolean\n elif origin == float:\n ptype = CommandOptionType.number\n elif origin == str:\n ptype = CommandOptionType.string\n else:\n ptype = CommandOptionType.string\n\n option.update({\n \"name\": parameter.name,\n \"description\": \"…\",\n \"type\": ptype.value,\n \"required\": (parameter.default == parameter.empty),\n \"autocomplete\": False,\n \"name_localizations\": {},\n \"description_localizations\": {},\n })\n\n self.options.append(option)\n\n def __repr__(self) -> str:\n return f\"<Command name='{self.name}'>\"\n\n @property\n def mention(self) -> str:\n \"\"\" `str`: Returns a mentionable string for the command \"\"\"\n if self.id:\n return f\"</{self.name}:{self.id}>\"\n return f\"`/{self.name}`\"\n\n def mention_sub(self, suffix: str) -> str:\n \"\"\"\n Returns a mentionable string for a subcommand.\n\n Parameters\n ----------\n suffix: `str`\n The subcommand name.\n\n Returns\n -------\n `str`\n The mentionable string.\n \"\"\"\n if self.id:\n return f\"</{self.name} {suffix}:{self.id}>\"\n return f\"`/{self.name} {suffix}`\"\n\n async def _make_context_and_run(\n self,\n context: \"Context\"\n ) -> BaseResponse:\n args, kwargs = context._create_args()\n\n for name, values in getattr(self.command, \"__choices_params__\", {}).items():\n if name not in kwargs:\n continue\n if name not in self.__list_choices:\n continue\n kwargs[name] = Choice(\n kwargs[name], values[kwargs[name]]\n )\n\n result = await self.run(context, *args, **kwargs)\n\n if not isinstance(result, BaseResponse):\n raise TypeError(\n f\"Command {self.name} must return a \"\n f\"Response object, not {type(result)}.\"\n )\n\n return result\n\n def _has_permissions(self, ctx: \"Context\") -> Permissions:\n _perms: Optional[Permissions] = getattr(\n self.command, \"__has_permissions__\", None\n )\n\n if _perms is None:\n return Permissions(0)\n\n if (\n isinstance(ctx.user, Member) and\n Permissions.administrator in ctx.user.resolved_permissions\n ):\n return Permissions(0)\n\n missing = Permissions(sum([\n flag.value for flag in _perms\n if flag not in ctx.app_permissions\n ]))\n\n return missing\n\n def _bot_has_permissions(self, ctx: \"Context\") -> Permissions:\n _perms: Optional[Permissions] = getattr(\n self.command, \"__bot_has_permissions__\", None\n )\n\n if _perms is None:\n return Permissions(0)\n if Permissions.administrator in ctx.app_permissions:\n return Permissions(0)\n\n missing = Permissions(sum([\n flag.value for flag in _perms\n if flag not in ctx.app_permissions\n ]))\n\n return missing\n\n async def _command_checks(self, ctx: \"Context\") -> bool:\n _checks: list[Callable] = getattr(\n self.command, \"__checks__\", []\n )\n\n for g in _checks:\n if inspect.iscoroutinefunction(g):\n result = await g(ctx)\n else:\n result = g(ctx)\n\n if result is not True:\n raise CheckFailed(f\"Check {g.__name__} failed.\")\n\n return True\n\n async def run(self, context: \"Context\", *args, **kwargs) -> BaseResponse:\n \"\"\"\n Runs the command.\n\n Parameters\n ----------\n context: `Context`\n The context of the command.\n\n Returns\n -------\n `BaseResponse`\n The return type of the command, used by backend.py (Quart)\n\n Raises\n ------\n `UserMissingPermissions`\n User that ran the command is missing permissions.\n `BotMissingPermissions`\n Bot is missing permissions.\n \"\"\"\n # Check user permissions\n perms_user = self._has_permissions(context)\n if perms_user != Permissions(0):\n raise UserMissingPermissions(perms_user)\n\n # Check bot permissions\n perms_bot = self._bot_has_permissions(context)\n if perms_bot != Permissions(0):\n raise BotMissingPermissions(perms_bot)\n\n # Check custom checks\n await self._command_checks(context)\n\n if self.cog is not None:\n return await self.command(self.cog, context, *args, **kwargs)\n else:\n return await self.command(context, *args, **kwargs)\n\n async def run_autocomplete(\n self,\n context: \"Context\",\n name: str,\n current: str\n ) -> dict:\n \"\"\"\n Runs the autocomplete\n\n Parameters\n ----------\n context: `Context`\n Context object for the command\n name: `str`\n Name of the option\n current: `str`\n Current value of the option\n\n Returns\n -------\n `dict`\n The return type of the command, used by backend.py (Quart)\n\n Raises\n ------\n `TypeError`\n Autocomplete must return an AutocompleteResponse object\n \"\"\"\n if self.cog is not None:\n result = await self.list_autocompletes[name](self.cog, context, current)\n else:\n result = await self.list_autocompletes[name](context, current)\n\n if isinstance(result, AutocompleteResponse):\n return result.to_dict()\n raise TypeError(\"Autocomplete must return an AutocompleteResponse object.\")\n\n def _find_option(self, name: str) -> Optional[dict]:\n return next((g for g in self.options if g[\"name\"] == name), None)\n\n def to_dict(self) -> dict:\n \"\"\"\n Converts the command to a dict.\n\n Returns\n -------\n `dict`\n The dict of the command.\n \"\"\"\n _extra_locale = getattr(self.command, \"__locales__\", {})\n _extra_params = getattr(self.command, \"__describe_params__\", {})\n _extra_choices = getattr(self.command, \"__choices_params__\", {})\n _default_permissions = getattr(self.command, \"__default_permissions__\", None)\n\n # Types\n _extra_locale: dict[LocaleTypes, list[LocaleContainer]]\n\n data = {\n \"type\": self.type,\n \"name\": self.name,\n \"description\": self.description,\n \"options\": self.options,\n \"default_permission\": True,\n \"dm_permission\": getattr(self.command, \"__dm_permission__\", True),\n \"nsfw\": getattr(self.command, \"__nsfw__\", False),\n \"name_localizations\": {},\n \"description_localizations\": {},\n }\n\n for key, value in _extra_locale.items():\n for loc in value:\n if loc.key == \"_\":\n data[\"name_localizations\"][key] = loc.name\n data[\"description_localizations\"][key] = loc.description\n continue\n\n opt = self._find_option(loc.key)\n if not opt:\n _log.warn(\n f\"{self.name} -> {loc.key}: \"\n \"Option not found in command, skipping...\"\n )\n continue\n\n opt[\"name_localizations\"][key] = loc.name\n opt[\"description_localizations\"][key] = loc.description\n\n if _default_permissions:\n data[\"default_member_permissions\"] = _default_permissions\n\n for key, value in _extra_params.items():\n opt = self._find_option(key)\n if not opt:\n continue\n\n opt[\"description\"] = value\n\n for key, value in _extra_choices.items():\n opt = self._find_option(key)\n if not opt:\n continue\n\n opt[\"choices\"] = [\n {\"name\": v, \"value\": k}\n for k, v in value.items()\n ]\n\n return data\n\n def autocomplete(self, name: str):\n \"\"\"\n Decorator to set an option as an autocomplete.\n\n The function must at the end, return a `Response.send_autocomplete()` object.\n\n Example usage\n\n .. code-block:: python\n\n @commands.command()\n async def ping(ctx, options: str):\n await ctx.send(f\"You chose {options}\")\n\n @ping.autocomplete(\"options\")\n async def search_autocomplete(ctx, current: str):\n return ctx.response.send_autocomplete({\n \"key\": \"Value shown to user\",\n \"feeling_lucky_tm\": \"I'm feeling lucky!\"\n })\n\n Parameters\n ----------\n name: `str`\n Name of the option to set as an autocomplete.\n \"\"\"\n def wrapper(func):\n find_option = next((\n option for option in self.options\n if option[\"name\"] == name\n ), None)\n\n if not find_option:\n raise ValueError(f\"Option {name} in command {self.name} not found.\")\n find_option[\"autocomplete\"] = True\n self.list_autocompletes[name] = func\n return func\n\n return wrapper"
},
{
"identifier": "Interaction",
"path": "discord_http/commands.py",
"snippet": "class Interaction:\n def __init__(\n self,\n func: Callable,\n custom_id: str,\n *,\n regex: bool = False\n ):\n self.func = func\n self.custom_id = custom_id\n self.cog: Optional[\"Cog\"] = None\n self.is_regex: bool = regex\n\n def __repr__(self) -> str:\n return (\n f\"<Interaction custom_id='{self.custom_id}' \"\n f\"is_regex={self.is_regex}>\"\n )\n\n async def run(self, context: \"Context\") -> BaseResponse:\n \"\"\"\n Runs the interaction.\n\n Parameters\n ----------\n context: `Context`\n The context of the interaction.\n\n Returns\n -------\n `BaseResponse`\n The return type of the interaction, used by backend.py (Quart)\n\n Raises\n ------\n `TypeError`\n Interaction must be a Response object\n \"\"\"\n if self.cog is not None:\n result = await self.func(self.cog, context)\n else:\n result = await self.func(context)\n\n if not isinstance(result, BaseResponse):\n raise TypeError(\"Interaction must be a Response object\")\n\n return result"
},
{
"identifier": "Listener",
"path": "discord_http/commands.py",
"snippet": "class Listener:\n def __init__(self, name: str, coro: Callable):\n self.name = name\n self.coro = coro\n self.cog: Optional[\"Cog\"] = None\n\n def __repr__(self) -> str:\n return f\"<Listener name='{self.name}'>\"\n\n async def run(self, *args, **kwargs):\n \"\"\" Runs the listener \"\"\"\n if self.cog is not None:\n await self.coro(self.cog, *args, **kwargs)\n else:\n await self.coro(*args, **kwargs)"
},
{
"identifier": "SubGroup",
"path": "discord_http/commands.py",
"snippet": "class SubGroup(Command):\n def __init__(\n self,\n *,\n name: str,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None\n ):\n self.name = name\n self.description = description or \"...\" # Only used to make Discord happy\n self.guild_ids: list[Union[utils.Snowflake, int]] = guild_ids or []\n self.type = int(ApplicationCommandType.chat_input)\n self.cog: Optional[\"Cog\"] = None\n self.subcommands: Dict[str, Union[SubCommand, SubGroup]] = {}\n\n def __repr__(self) -> str:\n _subs = [g for g in self.subcommands.values()]\n return f\"<SubGroup name='{self.name}', subcommands={_subs}>\"\n\n def command(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None,\n ):\n \"\"\"\n Decorator to add a subcommand to a subcommand group\n\n Parameters\n ----------\n name: `Optional[str]`\n Name of the command (defaults to the function name)\n description: `Optional[str]`\n Description of the command (defaults to the function docstring)\n guild_ids: `Optional[list[Union[utils.Snowflake, int]]]`\n List of guild IDs to register the command in\n \"\"\"\n def decorator(func):\n subcommand = SubCommand(\n func,\n name=name or func.__name__,\n description=description,\n guild_ids=guild_ids,\n )\n self.subcommands[subcommand.name] = subcommand\n return subcommand\n return decorator\n\n def group(self, name: Optional[str] = None):\n \"\"\"\n Decorator to add a subcommand group to a subcommand group\n\n Parameters\n ----------\n name: `Optional[str]`\n Name of the subcommand group (defaults to the function name)\n \"\"\"\n def decorator(func):\n subgroup = SubGroup(name=name or func.__name__)\n self.subcommands[subgroup.name] = subgroup\n return subgroup\n return decorator\n\n def add_group(self, name: str) -> \"SubGroup\":\n \"\"\"\n Adds a subcommand group to a subcommand group\n\n Parameters\n ----------\n name: `str`\n Name of the subcommand group\n\n Returns\n -------\n `SubGroup`\n The subcommand group\n \"\"\"\n subgroup = SubGroup(name=name)\n self.subcommands[subgroup.name] = subgroup\n return subgroup\n\n @property\n def options(self) -> list[dict]:\n \"\"\" `list[dict]`: Returns the options of the subcommand group \"\"\"\n options = []\n for cmd in self.subcommands.values():\n data = cmd.to_dict()\n if isinstance(cmd, SubGroup):\n data[\"type\"] = int(CommandOptionType.sub_command_group)\n else:\n data[\"type\"] = int(CommandOptionType.sub_command)\n options.append(data)\n return options"
},
{
"identifier": "InteractionType",
"path": "discord_http/enums.py",
"snippet": "class InteractionType(Enum):\n ping = 1\n application_command = 2\n message_component = 3\n application_command_autocomplete = 4\n modal_submit = 5"
},
{
"identifier": "CheckFailed",
"path": "discord_http/errors.py",
"snippet": "class CheckFailed(DiscordException):\n \"\"\" Raised whenever a check fails \"\"\"\n pass"
},
{
"identifier": "BaseResponse",
"path": "discord_http/response.py",
"snippet": "class BaseResponse:\n def __init__(self):\n pass\n\n @property\n def content_type(self) -> str:\n \"\"\" `str`: Returns the content type of the response \"\"\"\n multidata = MultipartData()\n return multidata.content_type\n\n def to_dict(self) -> dict:\n \"\"\" Default method to convert the response to a `dict` \"\"\"\n raise NotImplementedError\n\n def to_multipart(self) -> bytes:\n \"\"\" Default method to convert the response to a `bytes` \"\"\"\n raise NotImplementedError"
},
{
"identifier": "Ping",
"path": "discord_http/response.py",
"snippet": "class Ping:\n def __init__(\n self,\n *,\n state: \"DiscordAPI\",\n data: dict\n ):\n self._state = state\n self._raw_user = data[\"user\"]\n\n self.id: int = int(data[\"id\"])\n self.application_id: int = int(data[\"application_id\"])\n self.version: int = int(data[\"version\"])\n\n def __repr__(self) -> str:\n return f\"<Ping application_id={self.application_id} user='{self.user}'>\"\n\n def __int__(self) -> int:\n return self.id\n\n @property\n def user(self) -> \"User\":\n \"\"\" `User`: Returns the user object of the bot \"\"\"\n from .user import User\n return User(state=self._state, data=self._raw_user)"
},
{
"identifier": "MessageResponse",
"path": "discord_http/response.py",
"snippet": "class MessageResponse(BaseResponse):\n def __init__(\n self,\n content: Optional[str] = MISSING,\n *,\n file: Optional[File] = MISSING,\n files: Optional[list[File]] = MISSING,\n embed: Optional[Embed] = MISSING,\n embeds: Optional[list[Embed]] = MISSING,\n attachment: Optional[File] = MISSING,\n attachments: Optional[list[File]] = MISSING,\n view: Optional[View] = MISSING,\n tts: Optional[bool] = False,\n allowed_mentions: Optional[AllowedMentions] = MISSING,\n message_reference: Optional[\"MessageReference\"] = MISSING,\n type: Union[ResponseType, int] = 4,\n ephemeral: Optional[bool] = False,\n ):\n self.content = content\n self.files = files\n self.embeds = embeds\n self.attachments = attachments\n self.ephemeral = ephemeral\n self.view = view\n self.tts = tts\n self.type = type\n self.allowed_mentions = allowed_mentions\n self.message_reference = message_reference\n\n if file is not MISSING and files is not MISSING:\n raise TypeError(\"Cannot pass both file and files\")\n if file is not MISSING:\n self.files = [file]\n\n if embed is not MISSING and embeds is not MISSING:\n raise TypeError(\"Cannot pass both embed and embeds\")\n if embed is not MISSING:\n if embed is None:\n self.embeds = []\n else:\n self.embeds = [embed]\n\n if attachment is not MISSING and attachments is not MISSING:\n raise TypeError(\"Cannot pass both attachment and attachments\")\n if attachment is not MISSING:\n if attachment is None:\n self.attachments = []\n else:\n self.attachments = [attachment]\n\n if self.view is not MISSING and self.view is None:\n self.view = View()\n\n if self.attachments is not MISSING:\n self.files = (\n [a for a in self.attachments if isinstance(a, File)]\n if self.attachments is not None else None\n )\n\n def to_dict(self, is_request: bool = False) -> dict:\n \"\"\"\n The JSON data that is sent to Discord.\n\n Parameters\n ----------\n is_request: `bool`\n Whether the data is being sent to Discord or not.\n\n Returns\n -------\n `dict`\n The JSON data that can either be sent\n to Discord or forwarded to a new parser\n \"\"\"\n output: dict[str, Any] = {\n \"flags\": (\n MessageFlags.ephemeral.value\n if self.ephemeral else 0\n )\n }\n\n if self.content is not MISSING:\n output[\"content\"] = self.content\n\n if self.tts:\n output[\"tts\"] = self.tts\n\n if self.message_reference is not MISSING:\n output[\"message_reference\"] = self.message_reference.to_dict()\n\n if self.embeds is not MISSING:\n output[\"embeds\"] = [\n embed.to_dict() for embed in self.embeds # type: ignore\n if isinstance(embed, Embed)\n ]\n\n if self.view is not MISSING:\n output[\"components\"] = self.view.to_dict()\n\n if self.allowed_mentions is not MISSING:\n output[\"allowed_mentions\"] = self.allowed_mentions.to_dict()\n\n if self.attachments is not MISSING:\n if self.attachments is None:\n output[\"attachments\"] = []\n else:\n _index = 0\n _file_payload = []\n for a in self.attachments:\n if not isinstance(a, File):\n continue\n _file_payload.append(a.to_dict(_index))\n _index += 1\n output[\"attachments\"] = _file_payload\n\n if is_request:\n return output\n return {\"type\": int(self.type), \"data\": output}\n\n def to_multipart(self, is_request: bool = False) -> bytes:\n \"\"\"\n The multipart data that is sent to Discord.\n\n Parameters\n ----------\n is_request: `bool`\n Whether the data is being sent to Discord or not.\n\n Returns\n -------\n `bytes`\n The multipart data that can either be sent\n \"\"\"\n multidata = MultipartData()\n\n if isinstance(self.files, list):\n for i, file in enumerate(self.files):\n multidata.attach(\n f\"files[{i}]\",\n file, filename=file.filename # type: ignore\n )\n\n multidata.attach(\n \"payload_json\",\n self.to_dict(is_request=is_request)\n )\n\n return multidata.finish()"
}
] | import asyncio
import logging
import signal
from datetime import datetime
from hypercorn.asyncio import serve
from hypercorn.config import Config as HyperConfig
from nacl.exceptions import BadSignatureError
from nacl.signing import VerifyKey
from quart import Quart, request, abort
from quart import Response as QuartResponse
from quart.logging import default_handler
from quart.utils import MustReloadError, restart
from typing import Optional, Any, Union, TYPE_CHECKING
from .commands import Command, Interaction, Listener, SubGroup
from .enums import InteractionType
from .errors import CheckFailed
from .response import BaseResponse, Ping, MessageResponse
from .client import Client
from .context import Context | 7,291 | def error_messages(
self,
ctx: "Context",
e: Exception
) -> Optional[MessageResponse]:
"""
Used to return error messages to Discord
Parameters
----------
ctx: `Context`
The context of the command
e: `Exception`
The exception that was raised
Returns
-------
`Optional[MessageResponse]`
The message response provided by the library error handler
"""
if isinstance(e, CheckFailed):
return ctx.response.send_message(
content=str(e),
ephemeral=True
)
def _dig_subcommand(
self,
cmd: Union[Command, SubGroup],
data: dict
) -> tuple[Optional[Command], list[dict]]:
""" Used to dig through subcommands to execute correct command/autocomplete """
data_options: list[dict] = data["data"].get("options", [])
while isinstance(cmd, SubGroup):
find_next_step = next((
g for g in data_options
if g.get("name", None) and not g.get("value", None)
), None)
if not find_next_step:
return abort(400, "invalid command")
cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore
if not cmd:
_log.warn(
f"Unhandled subcommand: {find_next_step['name']} "
"(not found in local command list)"
)
return abort(404, "command not found")
data_options = find_next_step.get("options", [])
return cmd, data_options
async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]:
"""
The main function to handle all HTTP requests sent by Discord
Please do not touch this function, unless you know what you're doing
"""
await self.validate_request()
data = await request.json
if self.debug_events:
self.bot.dispatch("raw_interaction", data)
context = self.bot._context(self.bot, data)
data_type = data.get("type", -1)
match data_type:
case InteractionType.ping:
_ping = Ping(state=self.bot.state, data=data)
if self.bot.has_any_dispatch("ping"):
self.bot.dispatch("ping", _ping)
else:
_log.info(f"Discord HTTP Ping | {_ping}")
return context.response.pong()
case InteractionType.application_command:
_log.debug("Received slash command, processing...")
command_name = data["data"]["name"]
cmd = self.bot.commands.get(command_name)
if not cmd:
_log.warn(
f"Unhandeled command: {command_name} "
"(not found in local command list)"
)
return QuartResponse(
"command not found",
status=404
)
cmd, data_options = self._dig_subcommand(cmd, data)
try:
payload = await cmd._make_context_and_run(
context=context
)
return QuartResponse(
payload.to_multipart(),
content_type=payload.content_type
)
except Exception as e:
if self.bot.has_any_dispatch("interaction_error"):
self.bot.dispatch("interaction_error", context, e)
else:
_log.error(
f"Error while running command {cmd.name}",
exc_info=e
)
_send_error = self.error_messages(context, e)
if _send_error and isinstance(_send_error, BaseResponse):
return _send_error.to_dict()
return abort(500)
case x if x in (
|
if TYPE_CHECKING:
_log = logging.getLogger(__name__)
__all__ = (
"DiscordHTTP",
)
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
""" Used by Quart to cancel all tasks on shutdown. """
tasks = [
task for task in asyncio.all_tasks(loop)
if not task.done()
]
if not tasks:
return
for task in list(tasks):
task.cancel()
if task.get_coro().__name__ == "_windows_signal_support":
tasks.remove(task)
loop.run_until_complete(
asyncio.gather(*tasks, return_exceptions=True)
)
for task in tasks:
if not task.cancelled() and task.exception() is not None:
loop.call_exception_handler({
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task
})
class DiscordHTTP(Quart):
def __init__(self, *, client: "Client"):
"""
This serves as the fundemental HTTP server for Discord Interactions
We recommend to not touch this class, unless you know what you're doing
"""
self.uptime: datetime = datetime.now()
self.bot: "Client" = client
self.loop = self.bot.loop
self.debug_events = self.bot.debug_events
self._cog_commands: dict[str, Command] = {}
self._cog_interactions: dict[str, Interaction] = {}
self._cog_listeners: list[Listener] = []
super().__init__(__name__)
# Remove Quart's default logging handler
_quart_log = logging.getLogger("quart.app")
_quart_log.removeHandler(default_handler)
_quart_log.setLevel(logging.CRITICAL)
async def validate_request(self) -> None:
""" Used to validate requests sent by Discord Webhooks """
if not self.bot.public_key:
return abort(401, "invalid public key")
verify_key = VerifyKey(bytes.fromhex(self.bot.public_key))
signature: str = request.headers.get("X-Signature-Ed25519", "")
timestamp: str = request.headers.get("X-Signature-Timestamp", "")
try:
data = await request.data
body = data.decode("utf-8")
verify_key.verify(
f"{timestamp}{body}".encode(),
bytes.fromhex(signature)
)
except BadSignatureError:
abort(401, "invalid request signature")
except Exception:
abort(400, "invalid request body")
def error_messages(
self,
ctx: "Context",
e: Exception
) -> Optional[MessageResponse]:
"""
Used to return error messages to Discord
Parameters
----------
ctx: `Context`
The context of the command
e: `Exception`
The exception that was raised
Returns
-------
`Optional[MessageResponse]`
The message response provided by the library error handler
"""
if isinstance(e, CheckFailed):
return ctx.response.send_message(
content=str(e),
ephemeral=True
)
def _dig_subcommand(
self,
cmd: Union[Command, SubGroup],
data: dict
) -> tuple[Optional[Command], list[dict]]:
""" Used to dig through subcommands to execute correct command/autocomplete """
data_options: list[dict] = data["data"].get("options", [])
while isinstance(cmd, SubGroup):
find_next_step = next((
g for g in data_options
if g.get("name", None) and not g.get("value", None)
), None)
if not find_next_step:
return abort(400, "invalid command")
cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore
if not cmd:
_log.warn(
f"Unhandled subcommand: {find_next_step['name']} "
"(not found in local command list)"
)
return abort(404, "command not found")
data_options = find_next_step.get("options", [])
return cmd, data_options
async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]:
"""
The main function to handle all HTTP requests sent by Discord
Please do not touch this function, unless you know what you're doing
"""
await self.validate_request()
data = await request.json
if self.debug_events:
self.bot.dispatch("raw_interaction", data)
context = self.bot._context(self.bot, data)
data_type = data.get("type", -1)
match data_type:
case InteractionType.ping:
_ping = Ping(state=self.bot.state, data=data)
if self.bot.has_any_dispatch("ping"):
self.bot.dispatch("ping", _ping)
else:
_log.info(f"Discord HTTP Ping | {_ping}")
return context.response.pong()
case InteractionType.application_command:
_log.debug("Received slash command, processing...")
command_name = data["data"]["name"]
cmd = self.bot.commands.get(command_name)
if not cmd:
_log.warn(
f"Unhandeled command: {command_name} "
"(not found in local command list)"
)
return QuartResponse(
"command not found",
status=404
)
cmd, data_options = self._dig_subcommand(cmd, data)
try:
payload = await cmd._make_context_and_run(
context=context
)
return QuartResponse(
payload.to_multipart(),
content_type=payload.content_type
)
except Exception as e:
if self.bot.has_any_dispatch("interaction_error"):
self.bot.dispatch("interaction_error", context, e)
else:
_log.error(
f"Error while running command {cmd.name}",
exc_info=e
)
_send_error = self.error_messages(context, e)
if _send_error and isinstance(_send_error, BaseResponse):
return _send_error.to_dict()
return abort(500)
case x if x in ( | InteractionType.message_component, | 4 | 2023-11-14 12:50:42+00:00 | 12k |
newcastleuniversity/DISPEL | dispel/providers/mobilized/io.py | [
{
"identifier": "Evaluation",
"path": "dispel/data/core.py",
"snippet": "class Evaluation(Epoch):\n \"\"\"Evaluation information for a :class:`Reading`.\n\n The evaluation corresponds to the json related task, whereas the session corresponds\n to the group of tasks that the evaluation finds itself in.\n\n FIXME: DOC\n\n Attributes\n ----------\n uuid\n The unique unified identifier of the evaluation\n finished\n ``True`` if the concerned task has been finished normally. ``False`` otherwise.\n exit_reason\n The exit condition. It determines the type of interruption if the test was\n interrupted, as well as the reason for the end of the test if the test has\n been completed.\n user_id\n The identifier of the user\n \"\"\"\n\n def __init__(\n self,\n *args,\n uuid: str,\n finished: Optional[bool] = None,\n exit_reason: Optional[str] = None,\n user_id: Optional[str] = None,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n\n if self.is_incomplete:\n raise ValueError(\"Evaluation epoch must always be complete\")\n\n self.uuid = uuid\n self.finished = finished\n self.exit_reason = exit_reason\n self.user_id = user_id\n\n def to_dict(self):\n \"\"\"Retrieve values of evaluation as dictionary.\"\"\"\n return {\n \"evaluation_code\": str(self.id),\n \"start_date\": str(self.start),\n \"end_date\": str(self.end),\n \"uuid\": self.uuid,\n \"user_id\": self.user_id if self.user_id else \"\",\n \"is_finished\": self.finished if self.finished else \"\",\n \"exit_reason\": self.exit_reason if self.exit_reason else \"\",\n }"
},
{
"identifier": "Reading",
"path": "dispel/data/core.py",
"snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n\n Parameters\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n levels\n An iterable of Level\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n \"\"\"\n\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n super().__init__()\n self.evaluation = evaluation\n self.session = session\n self.measure_set: MeasureSet = measure_set or MeasureSet()\n self.schema = schema\n self.date = pd.Timestamp(date) if date else None\n self.device = device\n self._attempt: Dict[str, int] = defaultdict(int)\n\n # verify time frame compatibility\n if (\n self.session\n and not self.session.is_incomplete\n and not self.session.contains(self.evaluation)\n ):\n raise ValueError(\"Evaluation start and end must be within session\")\n\n # create dictionary of levels\n self._levels: Dict[LevelId, Level] = {}\n\n # set level if arg is provided\n if levels:\n for level in levels:\n self.set(level)\n\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n \"\"\"Get level for a given level_id.\n\n Parameters\n ----------\n level_id\n The id identifying the level.\n\n Returns\n -------\n Level\n The level identified by ``level_id``. If no level id is provided and the\n reading contains only one level it will be returned. Otherwise, the function\n will raise a :class:`ValueError`.\n\n Raises\n ------\n ValueError\n If the given id does not match any existing level within the reading.\n ValueError\n If no id has been provided, and there are multiple levels withing the\n reading.\n \"\"\"\n # check if an arg is provided\n if level_id:\n if isinstance(level_id, str):\n level_id = LevelId.from_str(level_id) # type: ignore\n # check that this is a correct id\n if level_id not in self._levels:\n raise ValueError(\n f\"{level_id=} does not match any Level in {self._levels.keys()}\"\n )\n return self._levels[level_id] # type: ignore\n\n # if no level_id provided, check if there is only one level\n if len(self._levels) == 1:\n return next(iter(self._levels.values()))\n\n # if not, ask user for a level_id\n raise ValueError(\n f\"There are {len(self._levels)} levels, please provide a level_id in\"\n f\" {self._levels.keys()}\"\n )\n\n def __repr__(self) -> str:\n return f'<Reading: {plural(\"level\", len(self))} ({self.flag_count_repr})>'\n\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n yield from self._levels.items()\n\n def __len__(self) -> int:\n return len(self._levels)\n\n @property\n def empty(self) -> bool:\n \"\"\"Check whether the reading is empty.\"\"\"\n return len(self) == 0\n\n @property\n def levels(self) -> ValuesView[Level]:\n \"\"\"Get a list of all Level in the reading.\"\"\"\n return self._levels.values()\n\n @property\n def level_ids(self) -> List[LevelId]:\n \"\"\"Get the list of level_id keys.\"\"\"\n return [level.id for level in self._levels.values()]\n\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n \"\"\"Check whether the reading contains the desired raw data set.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be searched for.\n level_id\n The level id in which the raw data set is to searched for.\n\n Returns\n -------\n bool\n ``True`` if the raw data set exists inside the given level. ``False``\n otherwise.\n \"\"\"\n return self.get_level(level_id).has_raw_data_set(data_set_id)\n\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id and a level.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be retrieved.\n level_id\n The level id from which the raw data set is to retrieved.\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id.\n \"\"\"\n return self.get_level(level_id).get_raw_data_set(data_set_id)\n\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n \"\"\"Get measure_set from level identified with level_id.\"\"\"\n if not level_id:\n return self.measure_set\n return self.get_level(level_id).measure_set\n\n def get_merged_measure_set(self) -> MeasureSet:\n \"\"\"Get a measure set containing all the reading's measure values.\"\"\"\n return sum(\n (self.measure_set, *(level.measure_set for level in self.levels)),\n MeasureSet(),\n )\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a reading.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n \"\"\"Get level from id or level itself.\"\"\"\n if isinstance(level, Level):\n return level\n return self.get_level(level)\n\n @set.register(MeasureSet)\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n if level is None:\n self.measure_set += value\n else:\n self._get_level(level).set(value)\n\n @set.register(MeasureValue)\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n if epoch is not None:\n epoch.set(value)\n else:\n if level is None:\n measure_set = self.measure_set\n else:\n measure_set = self._get_level(level).measure_set\n\n measure_set.set(value)\n\n @set.register(RawDataSet)\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n self._get_level(level).set(value, concatenate=concatenate, overwrite=overwrite)\n\n @set.register(LevelEpoch)\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n self._get_level(level).set(value)\n\n @set.register(Level)\n def _level(self, value: Level):\n \"\"\"Set a level.\"\"\"\n level_id_str = str(value.id)\n for lev in self._levels:\n if str(lev).startswith(level_id_str) and level_id_str in self._attempt:\n self._attempt[level_id_str] += 1\n break\n if level_id_str not in self._attempt:\n new_level = LevelId.from_str(level_id_str)\n self._levels[new_level] = value # type: ignore\n self._attempt[str(new_level.id)] = 1\n else:\n new_level_id_str = \"-\".join(\n [level_id_str, str(self._attempt[level_id_str]).zfill(2)]\n )\n value.id = cast(LevelId, LevelId.from_str(new_level_id_str))\n self._levels[value.id] = value\n # TODO: use sorting by effective time frame to ensure orders to\n # attempts :\n # level_ids = sorted(level_ids, key=lambda x:\n # reading.get_level(x).effective_time_frame.start )\n self._levels[value.id].context.set(\n value=self._attempt[level_id_str],\n definition=ValueDefinition(\n id_=\"attempt\", name=f\"The attempt number: {self._attempt[level_id_str]}\"\n ),\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)"
},
{
"identifier": "EpochDefinition",
"path": "dispel/data/epochs.py",
"snippet": "class EpochDefinition:\n \"\"\"The definition of an epoch.\n\n Parameters\n ----------\n id_\n The identifier of the epoch. This identifier does not have to be unique\n across multiple epochs and can serve as a type of epoch.\n name\n An optional plain-text name of the epoch definition.\n description\n A detailed description of the epoch providing additional resolution beyond\n the ``name`` property.\n\n Attributes\n ----------\n name\n An optional plain-text name of the epoch definition.\n description\n A detailed description of the epoch providing additional resolution beyond\n the ``name`` property.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, DefinitionId],\n name: Optional[str] = None,\n description: Optional[str] = None,\n ):\n self.id = id_ # type: ignore\n self.name = name\n self.description = description\n\n @property\n def id(self) -> DefinitionId:\n \"\"\"Get the ID of the definition.\n\n Returns\n -------\n DefinitionId\n The ID of the epoch definition.\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the definition.\n\n Parameters\n ----------\n value\n The ID of the definition. The ID has to be unique with respect to the\n time points of the :class:`Epoch`, i.e., if an epoch has the same ID,\n start, and end, it is considered equal.\n \"\"\"\n if not isinstance(value, DefinitionId):\n value = DefinitionId(value)\n self._id = value"
},
{
"identifier": "Context",
"path": "dispel/data/levels.py",
"snippet": "class Context(ValueSet):\n \"\"\"Contextual information for a level.\"\"\""
},
{
"identifier": "Level",
"path": "dispel/data/levels.py",
"snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)"
},
{
"identifier": "RawDataSet",
"path": "dispel/data/raw.py",
"snippet": "class RawDataSet(FlagMixIn):\n \"\"\"A raw data set.\n\n Parameters\n ----------\n definition\n The definition of the raw data set\n data\n The data set\n \"\"\"\n\n def __init__(self, definition: RawDataSetDefinition, data: pd.DataFrame):\n super().__init__()\n self.definition = definition\n self.data = data\n\n precision_exists = any(\n [d.precision is not None for d in self.definition.value_definitions]\n )\n if precision_exists:\n # if precision exists then store the original data prior to any rounding\n self.raw_data = data\n\n def_ids = {d.id for d in self.definition.value_definitions if not d.is_index}\n data_ids = set(data.columns)\n\n diff_data_columns = data_ids - def_ids\n if diff_data_columns:\n raise ValueError(f\"Missing definition for column(s): {diff_data_columns}\")\n\n diff_def_ids = def_ids - data_ids\n if diff_def_ids:\n raise ValueError(f\"Missing columns for definition(s): {diff_def_ids}\")\n\n # for each column definition check if precision exists and apply it to the data\n for col_def in self.definition.value_definitions:\n if col_def.precision is not None:\n self.data[col_def.id.id] = round(\n self.data[col_def.id.id], ndigits=col_def.precision\n )\n\n @property\n def id(self) -> str:\n \"\"\"Get the identifier from the definition of the raw data set.\"\"\"\n return self.definition.id\n\n def __repr__(self) -> str:\n return f\"<RawDataSet: {self.id} ({self.flag_count_repr})>\"\n\n def concat(self, other: \"RawDataSet\") -> \"RawDataSet\":\n \"\"\"Concatenate two raw data sets.\"\"\"\n if self.definition != other.definition:\n raise ValueError(\"Can only concatenate data sets with equal definitions\")\n return RawDataSet(self.definition, pd.concat([self.data, other.data]))"
},
{
"identifier": "RawDataSetDefinition",
"path": "dispel/data/raw.py",
"snippet": "class RawDataSetDefinition:\n \"\"\"The definition of a raw data set.\"\"\"\n\n #: The identifier of the raw data set definition\n id: str\n #: The source of the raw data set\n source: RawDataSetSource\n value_definitions_list: InitVar[Iterable[RawDataValueDefinition]]\n is_computed: bool = False\n \"\"\"`True` if the raw data source is computed. ``False`` if it is a measured\n source without transformation, e.g. acceleration recorded from the low\n level APIs.\"\"\"\n _value_definitions: Dict[DefinitionId, ValueDefinition] = field(init=False)\n\n def __post_init__(self, value_definitions_list):\n self._value_definitions = _create_value_definition_dict(value_definitions_list)\n\n @property\n def value_definitions(self):\n \"\"\"Get the value definitions of the raw data set.\"\"\"\n return self._value_definitions.values()\n\n def get_value_definition(self, id_: DefinitionId):\n \"\"\"Get a value definition.\"\"\"\n return self._value_definitions[id_]\n\n def __hash__(self):\n return hash(self.id)\n\n def __eq__(self, other):\n return (\n isinstance(other, RawDataSetDefinition)\n and self.id == other.id\n and self.source == other.source\n and eq(set(self.value_definitions), set(other.value_definitions))\n and self.is_computed == other.is_computed\n )"
},
{
"identifier": "RawDataSetSource",
"path": "dispel/data/raw.py",
"snippet": "class RawDataSetSource:\n \"\"\"The source of a raw data set.\"\"\"\n\n #: The manufacturer producing the raw data set source\n manufacturer: str"
},
{
"identifier": "RawDataValueDefinition",
"path": "dispel/data/raw.py",
"snippet": "class RawDataValueDefinition(ValueDefinition):\n \"\"\"The definition of raw data set values.\n\n Attributes\n ----------\n is_index\n ``True`` if the values are part of the raw data set index. Otherwise, ``False``.\n \"\"\"\n\n def __init__(\n self,\n id_: str,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n precision: Optional[int] = None,\n is_index: bool = False,\n ):\n super().__init__(\n id_=id_,\n name=name,\n unit=unit,\n description=description,\n data_type=data_type,\n precision=precision,\n )\n self.is_index = is_index"
},
{
"identifier": "Value",
"path": "dispel/data/values.py",
"snippet": "class Value:\n \"\"\"A value with definition and actual value.\n\n Parameters\n ----------\n definition\n The definition of the value.\n value\n The actual value. If `definition.precision` is set, then the value will be\n rounded to the number of significant digits. The pre-rounded value is stored in\n `raw_value`.\n \"\"\"\n\n def __init__(self, definition: ValueDefinition, value: Any):\n if not isinstance(definition, ValueDefinition):\n raise ValueError(\"Definition must be an instance of ValueDefinition\")\n\n self.definition = definition\n\n # store original raw value before precision rounding\n self.raw_value = value\n\n if definition.precision is not None:\n value = round(value, ndigits=definition.precision)\n\n self.value = value\n\n # validate value if validator is present\n if self.definition.validator:\n try:\n self.definition.validator(self.value)\n except ValidationException as exc:\n raise ValueError(\n f\"Provided value is not valid for {self.definition}: {exc}\"\n ) from exc\n\n @property\n def id(self) -> DefinitionId:\n \"\"\"Get the identifier from the definition of the value.\"\"\"\n return self.definition.id\n\n def __repr__(self):\n return f\"<{self.__class__.__name__} ({self.definition}): {self.value}>\"\n\n def __hash__(self):\n return hash((self.definition, self.value))\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n\n return False"
},
{
"identifier": "ValueDefinition",
"path": "dispel/data/values.py",
"snippet": "class ValueDefinition:\n \"\"\"The definition of a value.\n\n Parameters\n ----------\n id_\n The identifier of the value definition\n name\n The human-readable name of the values\n unit\n The unit of the value\n description\n A more elaborate description of the values and how they were produced\n data_type\n The numpy data type of the value in question\n validator\n A function that ensures values comply with the definition. The module\n :mod:`~dispel.data.validators` contains validators for common scenarios that can be\n used here.\n precision\n The number of significance for the values expected under definition. If set, the\n value will be rounded to the set number of digits.\n \"\"\"\n\n def __init__(\n self,\n id_: DefinitionIdType,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n validator: Optional[Callable[[Any], None]] = None,\n precision: Optional[int] = None,\n ):\n if isinstance(id_, str):\n id_ = DefinitionId.from_str(id_)\n\n self.id = id_ # pylint: disable=C0103\n self.name = name\n self.unit = unit\n self.description = description\n self.data_type = data_type\n\n # Verify that the validator is Callable\n if validator and not callable(validator):\n raise TypeError(f\"The {id_} measure validator is not Callable.\")\n\n self.validator = validator\n self.precision = precision\n\n def __repr__(self):\n unit_extra = f\", {self.unit}\" if self.unit else \"\"\n return f\"<{self.__class__.__name__}: {self.id} \" f\"({self.name}{unit_extra})>\"\n\n def __hash__(self):\n # TODO: make properties read-only\n return hash(\n (\n self.id,\n self.name,\n self.unit,\n self.description,\n self.validator,\n self.data_type,\n )\n )\n\n def __eq__(self, other):\n if isinstance(other, ValueDefinition):\n return hash(self) == hash(other)\n return False\n\n @classmethod\n def _get_parameters(cls) -> Set[str]:\n params = set(inspect.signature(cls.__init__).parameters.keys())\n params.remove(\"self\")\n return params\n\n def _to_dict(self) -> Dict[str, Any]:\n \"\"\"Turn instance into dict with values from constructor.\"\"\"\n\n def _getattr(name):\n if name == \"id_\":\n return self.id\n return getattr(self, name)\n\n return {name: _getattr(name) for name in self._get_parameters()}\n\n def derive(self, **kwargs) -> \"ValueDefinition\":\n \"\"\"Derive a value definition with updated properties.\n\n Parameters\n ----------\n kwargs\n Keyword arguments to be set/updated in the derived definition.\n\n Returns\n -------\n ValueDefinition\n A new definition with updated parameters.\n\n Raises\n ------\n ValueError\n If one of the provided arguments is not a parameter of the constructor.\n \"\"\"\n diff = set(kwargs.keys()).difference(self._get_parameters())\n if diff:\n raise ValueError(\n f\"The following parameters are unknown to the constructor: \"\n f'{\", \".join(sorted(diff))}'\n )\n\n new_kwargs = self._to_dict()\n new_kwargs.update(kwargs)\n return self.__class__(**new_kwargs)"
}
] | from collections.abc import MutableMapping
from typing import Dict, Tuple
from dispel.data.core import Evaluation, Reading
from dispel.data.epochs import EpochDefinition
from dispel.data.levels import Context, Level
from dispel.data.raw import (
RawDataSet,
RawDataSetDefinition,
RawDataSetSource,
RawDataValueDefinition,
)
from dispel.data.values import Value, ValueDefinition
import numpy as np
import pandas as pd
import scipy.io | 8,519 | data_to_cat = [
*[sub_dict[sensor_location][sensor_uni] for sensor_uni in set_sensors_uni],
*[sub_dict[sensor_location][sensor_xyz] for sensor_xyz in set_sensors_xyz],
]
columns = [
*[sensor_uni for sensor_uni in set_sensors_uni],
*[
f"{sensor_xyz}_{axis}"
for sensor_xyz in set_sensors_xyz
for axis in ["x", "y", "z"]
],
]
for remaining_measures in SET_REMAINING_MEASURES:
incl_remaining = remaining_measures in set_sensors
if incl_remaining:
data_to_cat.append(sub_dict[sensor_location][remaining_measures])
columns += [
f"{remaining_measures}_{n}"
for n in range(sub_dict[sensor_location][remaining_measures].shape[1])
]
df = pd.DataFrame(np.concatenate(data_to_cat, axis=1), columns=columns)
return df
def pre_formatting_yar(dict_mat: Dict) -> Tuple[str, Dict]:
"""Pre-format a YAR files."""
# Instantiate the data dictionary to use to create the reading
data_t1 = dict_mat["data"]["TimeMeasure1"]
# Give a name to the source here we choose YAR
source = "YAR"
return source, data_t1
def parse_mobilized_yar(path: str, verbose: bool = True) -> Reading:
"""Create a reading from mobilize-d .mat yar file."""
# Read the .mat file
dict_mat = read_matlab_file(path)
# Instantiate the reading start and end, they will be updated with recording min
# and max timestamps
reading_start = np.nan
reading_end = np.nan
# Instantiate the data dictionary and source
source, data_t1 = pre_formatting_yar(dict_mat)
# Instantiate an empty list of levels
list_level = []
# Go through the recordings
for it_level, (level_name, recording) in enumerate(data_t1.items()):
# Instantiate required variables
start = np.nan
end = np.nan
context = {}
raw_data_sets = []
if verbose:
print("___________")
print(f"Reading Level {level_name}")
# Go through assessments in the recording
for assessment, item in recording.items():
# If variable are contextual add them to context
if assessment in RECORDING_CONTEXT_KEYS:
context[assessment] = item.squeeze()
continue
# Else create a new level in the context to store information linked to
# the assessment
context[assessment] = {}
if verbose:
print("- - - - -")
print(f"{level_name}: assessment {assessment}")
# Specific case of Standards (here it is not about Acc, Gyr, Mag but
# pressure)
if assessment == "Standards":
# Go through the sources
for source in item.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - source {source}"
)
# Create a sub_dict at the level of the source
sub_dict = data_t1[level_name][assessment][source]
# create a new level in the context to store information linked
# to the source
context[assessment][source] = {}
# Usual case
if source != "INDIP":
# Go through sensor locations
for sensor_location in sub_dict.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - "
f"source {source} - "
f"sensor_location {sensor_location}"
)
# Storing contextual sensor frequency information
context[assessment][source][sensor_location] = {}
context[assessment][source][sensor_location][
"Fs"
] = sub_dict[sensor_location]["Fs"]
# Create a dataframe out of the source and sensor location
df = df_from_source(sub_dict, sensor_location)
# Create an identifier for the dataset
dataset_id = f"{assessment}-{source}-{sensor_location}"
# Create the definitions
definitions = [
| """Functionality to read Mobilize-D YAR files."""
# Define required CONSTANTS
RECORDING_CONTEXT_KEYS = {"StartDateTime", "TimeZone"}
SET_META_INFO = {"Fs", "Presence"}
SET_UNICOLUMN = {"Timestamp", "Bar"}
SET_REMAINING_MEASURES = {"Distance", "NormalizedPressure"}
SET_XYZ = {"Acc", "Gyr", "Mag"}
# Functions to read and unwrap matlab yar files
def unwrap_mat(data: np.ndarray):
"""Unwrap array generated by scipy load mat."""
if data.dtype.names is None:
return data
return {n: unwrap_mat(data[n][0, 0]) for n in data.dtype.names}
def read_matlab_file(path: str) -> dict:
"""Format matlab file to a dictionary."""
if not path.endswith(".mat"):
raise NotImplementedError("Only .mat files are supported.", path)
mat = scipy.io.loadmat(path)
res = {
"__header__": mat["__header__"],
"__version__": mat["__version__"],
"__globals__": mat["__globals__"],
"data": unwrap_mat(mat["data"]),
}
return res
# Function to create a Context from dictionary format
def flatten(dictionary, parent_key="", separator="_"):
"""Flatten a dictionary."""
items = []
for key, value in dictionary.items():
new_key = parent_key + separator + key if parent_key else key
if isinstance(value, MutableMapping):
items.extend(flatten(value, new_key, separator=separator).items())
else:
items.append((new_key, value))
return dict(items)
def parse_context(context: Dict) -> Context:
"""Parse the context information available.
Parameters
----------
context
A dictionary extracted from a mobilize-D .mat file
Returns
-------
Context
The context representation of the passed ``data``.
"""
values = [Value(ValueDefinition(item, item), context[item]) for item in context]
return Context(values)
def context_from_dict(dictionary: Dict) -> Context:
"""Flatten and cast to dictionary."""
return parse_context(flatten(dictionary, separator="."))
def df_from_source(sub_dict, sensor_location):
"""Create a dataframe from a nested dictionary and a sensor location."""
# Define sensor with multiple columns
set_sensors = set(sub_dict[sensor_location].keys()) - SET_META_INFO
set_sensors_uni = set_sensors & SET_UNICOLUMN
set_sensors_xyz = set_sensors & SET_XYZ
data_to_cat = [
*[sub_dict[sensor_location][sensor_uni] for sensor_uni in set_sensors_uni],
*[sub_dict[sensor_location][sensor_xyz] for sensor_xyz in set_sensors_xyz],
]
columns = [
*[sensor_uni for sensor_uni in set_sensors_uni],
*[
f"{sensor_xyz}_{axis}"
for sensor_xyz in set_sensors_xyz
for axis in ["x", "y", "z"]
],
]
for remaining_measures in SET_REMAINING_MEASURES:
incl_remaining = remaining_measures in set_sensors
if incl_remaining:
data_to_cat.append(sub_dict[sensor_location][remaining_measures])
columns += [
f"{remaining_measures}_{n}"
for n in range(sub_dict[sensor_location][remaining_measures].shape[1])
]
df = pd.DataFrame(np.concatenate(data_to_cat, axis=1), columns=columns)
return df
def pre_formatting_yar(dict_mat: Dict) -> Tuple[str, Dict]:
"""Pre-format a YAR files."""
# Instantiate the data dictionary to use to create the reading
data_t1 = dict_mat["data"]["TimeMeasure1"]
# Give a name to the source here we choose YAR
source = "YAR"
return source, data_t1
def parse_mobilized_yar(path: str, verbose: bool = True) -> Reading:
"""Create a reading from mobilize-d .mat yar file."""
# Read the .mat file
dict_mat = read_matlab_file(path)
# Instantiate the reading start and end, they will be updated with recording min
# and max timestamps
reading_start = np.nan
reading_end = np.nan
# Instantiate the data dictionary and source
source, data_t1 = pre_formatting_yar(dict_mat)
# Instantiate an empty list of levels
list_level = []
# Go through the recordings
for it_level, (level_name, recording) in enumerate(data_t1.items()):
# Instantiate required variables
start = np.nan
end = np.nan
context = {}
raw_data_sets = []
if verbose:
print("___________")
print(f"Reading Level {level_name}")
# Go through assessments in the recording
for assessment, item in recording.items():
# If variable are contextual add them to context
if assessment in RECORDING_CONTEXT_KEYS:
context[assessment] = item.squeeze()
continue
# Else create a new level in the context to store information linked to
# the assessment
context[assessment] = {}
if verbose:
print("- - - - -")
print(f"{level_name}: assessment {assessment}")
# Specific case of Standards (here it is not about Acc, Gyr, Mag but
# pressure)
if assessment == "Standards":
# Go through the sources
for source in item.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - source {source}"
)
# Create a sub_dict at the level of the source
sub_dict = data_t1[level_name][assessment][source]
# create a new level in the context to store information linked
# to the source
context[assessment][source] = {}
# Usual case
if source != "INDIP":
# Go through sensor locations
for sensor_location in sub_dict.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - "
f"source {source} - "
f"sensor_location {sensor_location}"
)
# Storing contextual sensor frequency information
context[assessment][source][sensor_location] = {}
context[assessment][source][sensor_location][
"Fs"
] = sub_dict[sensor_location]["Fs"]
# Create a dataframe out of the source and sensor location
df = df_from_source(sub_dict, sensor_location)
# Create an identifier for the dataset
dataset_id = f"{assessment}-{source}-{sensor_location}"
# Create the definitions
definitions = [ | RawDataValueDefinition(column, column.upper()) | 8 | 2023-11-14 10:06:46+00:00 | 12k |
NevermindNilas/TheAnimeScripter | src/segment/train.py | [
{
"identifier": "InSPyReNet",
"path": "src/segment/model/inspyrenet.py",
"snippet": "class InSPyReNet(nn.Module):\n def __init__(\n self,\n backbone,\n in_channels,\n depth=64,\n base_size=(384, 384),\n threshold: Optional[int] = 512,\n **kwargs,\n ):\n super(InSPyReNet, self).__init__()\n self.backbone = backbone\n self.in_channels = in_channels\n self.depth = depth\n self.base_size = base_size\n self.threshold = threshold\n\n self.context1 = PAA_e(\n self.in_channels[0], self.depth, base_size=self.base_size, stage=0\n )\n self.context2 = PAA_e(\n self.in_channels[1], self.depth, base_size=self.base_size, stage=1\n )\n self.context3 = PAA_e(\n self.in_channels[2], self.depth, base_size=self.base_size, stage=2\n )\n self.context4 = PAA_e(\n self.in_channels[3], self.depth, base_size=self.base_size, stage=3\n )\n self.context5 = PAA_e(\n self.in_channels[4], self.depth, base_size=self.base_size, stage=4\n )\n\n self.decoder = PAA_d(\n self.depth * 3, depth=self.depth, base_size=base_size, stage=2\n )\n\n self.attention0 = SICA(\n self.depth,\n depth=self.depth,\n base_size=self.base_size,\n stage=0,\n lmap_in=True,\n )\n self.attention1 = SICA(\n self.depth * 2,\n depth=self.depth,\n base_size=self.base_size,\n stage=1,\n lmap_in=True,\n )\n self.attention2 = SICA(\n self.depth * 2, depth=self.depth, base_size=self.base_size, stage=2\n )\n\n self.sod_loss_fn = lambda x, y: weighted_bce_loss_with_logits(\n x, y, reduction=\"mean\"\n ) + iou_loss_with_logits(x, y, reduction=\"mean\")\n self.pc_loss_fn = nn.L1Loss()\n\n self.ret = lambda x, target: F.interpolate(\n x, size=target.shape[-2:], mode=\"bilinear\", align_corners=False\n )\n self.res = lambda x, size: F.interpolate(\n x, size=size, mode=\"bilinear\", align_corners=False\n )\n self.des = lambda x, size: F.interpolate(x, size=size, mode=\"nearest\")\n\n self.image_pyramid = ImagePyramid(7, 1)\n\n self.transition0 = Transition(17)\n self.transition1 = Transition(9)\n self.transition2 = Transition(5)\n\n self.forward = self.forward_inference\n\n def _apply(self, fn):\n super(InSPyReNet, self)._apply(fn)\n self.image_pyramid._apply(fn)\n self.transition0._apply(fn)\n self.transition1._apply(fn)\n self.transition2._apply(fn)\n return self\n\n def train(self, mode=True):\n super(InSPyReNet, self).train(mode)\n self.forward = self.forward_train if mode else self.forward_inference\n return self\n\n def forward_inspyre(self, x):\n B, _, H, W = x.shape\n\n x1, x2, x3, x4, x5 = self.backbone(x)\n\n x1 = self.context1(x1) # 4\n x2 = self.context2(x2) # 4\n x3 = self.context3(x3) # 8\n x4 = self.context4(x4) # 16\n x5 = self.context5(x5) # 32\n\n f3, d3 = self.decoder([x3, x4, x5]) # 16\n f3 = self.res(f3, (H // 4, W // 4))\n f2, p2 = self.attention2(torch.cat([x2, f3], dim=1), d3.detach())\n\n d2 = self.image_pyramid.reconstruct(d3.detach(), p2) # 4\n\n x1 = self.res(x1, (H // 2, W // 2))\n f2 = self.res(f2, (H // 2, W // 2))\n\n f1, p1 = self.attention1(\n torch.cat([x1, f2], dim=1), d2.detach(), p2.detach()\n ) # 2\n d1 = self.image_pyramid.reconstruct(d2.detach(), p1) # 2\n\n f1 = self.res(f1, (H, W))\n _, p0 = self.attention0(f1, d1.detach(), p1.detach()) # 2\n d0 = self.image_pyramid.reconstruct(d1.detach(), p0) # 2\n\n out = dict()\n out[\"saliency\"] = [d3, d2, d1, d0]\n out[\"laplacian\"] = [p2, p1, p0]\n\n return out\n\n def forward_train(self, x, y):\n B, _, H, W = x.shape\n out = self.forward_inspyre(x)\n\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n y1 = self.image_pyramid.reduce(y)\n y2 = self.image_pyramid.reduce(y1)\n y3 = self.image_pyramid.reduce(y2)\n\n loss = (\n self.pc_loss_fn(\n self.des(d3, (H, W)),\n self.des(self.image_pyramid.reduce(d2), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += (\n self.pc_loss_fn(\n self.des(d2, (H, W)),\n self.des(self.image_pyramid.reduce(d1), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += (\n self.pc_loss_fn(\n self.des(d1, (H, W)),\n self.des(self.image_pyramid.reduce(d0), (H, W)).detach(),\n )\n * 0.0001\n )\n\n loss += self.sod_loss_fn(self.des(d3, (H, W)), self.des(y3, (H, W)))\n loss += self.sod_loss_fn(self.des(d2, (H, W)), self.des(y2, (H, W)))\n loss += self.sod_loss_fn(self.des(d1, (H, W)), self.des(y1, (H, W)))\n loss0 = self.sod_loss_fn(self.des(d0, (H, W)), self.des(y, (H, W)))\n loss += loss0\n\n pred = torch.sigmoid(d0)\n\n pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)\n sample = {\n \"pred\": pred,\n \"loss\": loss,\n \"loss0\": loss0,\n \"saliency\": [d3, d2, d1, d0],\n \"laplacian\": [p2, p1, p0],\n }\n return sample\n\n def forward_inference(self, x):\n B, _, H, W = x.shape\n\n if self.threshold is None:\n out = self.forward_inspyre(x)\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n elif H <= self.threshold or W <= self.threshold:\n out = self.forward_inspyre(self.res(x, self.base_size))\n\n d3, d2, d1, d0 = out[\"saliency\"]\n p2, p1, p0 = out[\"laplacian\"]\n\n else:\n # LR Saliency Pyramid\n lr_out = self.forward_inspyre(self.res(x, self.base_size))\n lr_d3, lr_d2, lr_d1, lr_d0 = lr_out[\"saliency\"]\n lr_p2, lr_p1, lr_p0 = lr_out[\"laplacian\"]\n\n # HR Saliency Pyramid\n if H % 32 != 0 or W % 32 != 0:\n x = self.res(x, ((H // 32) * 32, (W // 32) * 32))\n hr_out = self.forward_inspyre(x)\n hr_d3, hr_d2, hr_d1, hr_d0 = hr_out[\"saliency\"]\n hr_p2, hr_p1, hr_p0 = hr_out[\"laplacian\"]\n\n # Pyramid Blending\n d3 = self.ret(lr_d0, hr_d3)\n\n t2 = self.ret(self.transition2(d3), hr_p2)\n p2 = t2 * hr_p2\n d2 = self.image_pyramid.reconstruct(d3, p2)\n\n t1 = self.ret(self.transition1(d2), hr_p1)\n p1 = t1 * hr_p1\n d1 = self.image_pyramid.reconstruct(d2, p1)\n\n t0 = self.ret(self.transition0(d1), hr_p0)\n p0 = t0 * hr_p0\n d0 = self.image_pyramid.reconstruct(d1, p0)\n\n if d0.shape[2] != H or d0.shape[3] != 2:\n d0 = self.res(d0, (H, W))\n pred = torch.sigmoid(d0)\n pred = (pred - pred.min()) / (pred.max() - pred.min() + 1e-8)\n sample = {\n \"pred\": pred,\n \"loss\": 0,\n \"saliency\": [d3, d2, d1, d0],\n \"laplacian\": [p2, p1, p0],\n }\n return sample\n\n @staticmethod\n def compute_loss(sample):\n return sample[\"loss0\"], sample[\"loss\"]"
},
{
"identifier": "InSPyReNet_Res2Net50",
"path": "src/segment/model/inspyrenet.py",
"snippet": "def InSPyReNet_Res2Net50(\n depth=64,\n pretrained=True,\n base_size: Optional[Union[int, Tuple[int, int]]] = None,\n **kwargs,\n):\n if base_size is None:\n base_size = (384, 384)\n if isinstance(base_size, int):\n base_size = (base_size, base_size)\n return InSPyReNet(\n res2net50_v1b(pretrained=pretrained),\n [64, 256, 512, 1024, 2048],\n depth,\n base_size,\n threshold=None,\n **kwargs,\n )"
},
{
"identifier": "InSPyReNet_SwinB",
"path": "src/segment/model/inspyrenet.py",
"snippet": "def InSPyReNet_SwinB(\n depth=64,\n pretrained=False,\n base_size: Optional[Union[int, Tuple[int, int]]] = None,\n **kwargs,\n):\n if base_size is None:\n base_size = (384, 384)\n if isinstance(base_size, int):\n base_size = (base_size, base_size)\n return InSPyReNet(\n SwinB(pretrained=pretrained),\n [128, 128, 256, 512, 1024],\n depth,\n base_size,\n **kwargs,\n )"
},
{
"identifier": "ISNetDIS",
"path": "src/segment/model/isnet.py",
"snippet": "class ISNetDIS(nn.Module):\n def __init__(self, in_ch=3, out_ch=1):\n super(ISNetDIS, self).__init__()\n\n self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1)\n self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage1 = RSU7(64, 32, 64)\n self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage2 = RSU6(64, 32, 128)\n self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage3 = RSU5(128, 64, 256)\n self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage4 = RSU4(256, 128, 512)\n self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage5 = RSU4F(512, 256, 512)\n self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage6 = RSU4F(512, 256, 512)\n\n # decoder\n self.stage5d = RSU4F(1024, 256, 512)\n self.stage4d = RSU4(1024, 128, 256)\n self.stage3d = RSU5(512, 64, 128)\n self.stage2d = RSU6(256, 32, 64)\n self.stage1d = RSU7(128, 16, 64)\n\n self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)\n self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)\n self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)\n self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)\n\n # self.outconv = nn.Conv2d(6*out_ch,out_ch,1)\n\n @staticmethod\n def compute_loss_kl(preds, targets, dfs, fs, mode=\"MSE\"):\n return muti_loss_fusion_kl(preds, targets, dfs, fs, mode=mode)\n\n @staticmethod\n def compute_loss(args):\n if len(args) == 3:\n ds, dfs, labels = args\n return muti_loss_fusion(ds, labels)\n else:\n ds, dfs, labels, fs = args\n return muti_loss_fusion_kl(ds, labels, dfs, fs, mode=\"MSE\")\n\n def forward(self, x):\n hx = x\n\n hxin = self.conv_in(hx)\n hx = self.pool_in(hxin)\n\n # stage 1\n hx1 = self.stage1(hxin)\n hx = self.pool12(hx1)\n\n # stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n # stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n # stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n # stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n # stage 6\n hx6 = self.stage6(hx)\n hx6up = _upsample_like(hx6, hx5)\n\n # -------------------- decoder --------------------\n hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))\n hx5dup = _upsample_like(hx5d, hx4)\n\n hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))\n hx4dup = _upsample_like(hx4d, hx3)\n\n hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))\n hx3dup = _upsample_like(hx3d, hx2)\n\n hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))\n hx2dup = _upsample_like(hx2d, hx1)\n\n hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))\n\n # side output\n d1 = self.side1(hx1d)\n d1 = _upsample_like(d1, x)\n\n d2 = self.side2(hx2d)\n d2 = _upsample_like(d2, x)\n\n d3 = self.side3(hx3d)\n d3 = _upsample_like(d3, x)\n\n d4 = self.side4(hx4d)\n d4 = _upsample_like(d4, x)\n\n d5 = self.side5(hx5d)\n d5 = _upsample_like(d5, x)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6, x)\n\n # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6]\n return [d1, d2, d3, d4, d5, d6], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6]"
},
{
"identifier": "ISNetGTEncoder",
"path": "src/segment/model/isnet.py",
"snippet": "class ISNetGTEncoder(nn.Module):\n def __init__(self, in_ch=1, out_ch=1):\n super(ISNetGTEncoder, self).__init__()\n\n self.conv_in = myrebnconv(\n in_ch, 16, 3, stride=2, padding=1\n ) # nn.Conv2d(in_ch,64,3,stride=2,padding=1)\n\n self.stage1 = RSU7(16, 16, 64)\n self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage2 = RSU6(64, 16, 64)\n self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage3 = RSU5(64, 32, 128)\n self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage4 = RSU4(128, 32, 256)\n self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage5 = RSU4F(256, 64, 512)\n self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.stage6 = RSU4F(512, 64, 512)\n\n self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)\n self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)\n self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)\n self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)\n\n @staticmethod\n def compute_loss(args):\n preds, targets = args\n return muti_loss_fusion(preds, targets)\n\n def forward(self, x):\n hx = x\n\n hxin = self.conv_in(hx)\n # hx = self.pool_in(hxin)\n\n # stage 1\n hx1 = self.stage1(hxin)\n hx = self.pool12(hx1)\n\n # stage 2\n hx2 = self.stage2(hx)\n hx = self.pool23(hx2)\n\n # stage 3\n hx3 = self.stage3(hx)\n hx = self.pool34(hx3)\n\n # stage 4\n hx4 = self.stage4(hx)\n hx = self.pool45(hx4)\n\n # stage 5\n hx5 = self.stage5(hx)\n hx = self.pool56(hx5)\n\n # stage 6\n hx6 = self.stage6(hx)\n\n # side output\n d1 = self.side1(hx1)\n d1 = _upsample_like(d1, x)\n\n d2 = self.side2(hx2)\n d2 = _upsample_like(d2, x)\n\n d3 = self.side3(hx3)\n d3 = _upsample_like(d3, x)\n\n d4 = self.side4(hx4)\n d4 = _upsample_like(d4, x)\n\n d5 = self.side5(hx5)\n d5 = _upsample_like(d5, x)\n\n d6 = self.side6(hx6)\n d6 = _upsample_like(d6, x)\n\n # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))\n\n # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1, hx2, hx3, hx4, hx5, hx6]\n return [d1, d2, d3, d4, d5, d6], [hx1, hx2, hx3, hx4, hx5, hx6]"
},
{
"identifier": "MODNet",
"path": "src/segment/model/modnet.py",
"snippet": "class MODNet(nn.Module):\n \"\"\"Architecture of MODNet\"\"\"\n\n def __init__(\n self,\n in_channels=3,\n hr_channels=32,\n backbone_arch=\"mobilenetv2\",\n backbone_pretrained=False,\n ):\n super(MODNet, self).__init__()\n\n self.in_channels = in_channels\n self.hr_channels = hr_channels\n self.backbone_arch = backbone_arch\n self.backbone_pretrained = backbone_pretrained\n\n self.backbone = SUPPORTED_BACKBONES[self.backbone_arch](self.in_channels)\n\n self.lr_branch = LRBranch(self.backbone)\n self.hr_branch = HRBranch(self.hr_channels, self.backbone.enc_channels)\n self.f_branch = FusionBranch(self.hr_channels, self.backbone.enc_channels)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n self._init_conv(m)\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):\n self._init_norm(m)\n\n if self.backbone_pretrained:\n self.backbone.load_pretrained_ckpt()\n\n def forward(self, img, inference):\n pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(img, inference)\n pred_detail, hr2x = self.hr_branch(img, enc2x, enc4x, lr8x, inference)\n pred_matte = self.f_branch(img, lr8x, hr2x)\n\n return pred_semantic, pred_detail, pred_matte\n\n @staticmethod\n def compute_loss(args):\n pred_semantic, pred_detail, pred_matte, image, trimap, gt_matte = args\n semantic_loss, detail_loss, matte_loss = loss_func(\n pred_semantic, pred_detail, pred_matte, image, trimap, gt_matte\n )\n loss = semantic_loss + detail_loss + matte_loss\n return matte_loss, loss\n\n def freeze_norm(self):\n norm_types = [nn.BatchNorm2d, nn.InstanceNorm2d]\n for m in self.modules():\n for n in norm_types:\n if isinstance(m, n):\n m.eval()\n continue\n\n def _init_conv(self, conv):\n nn.init.kaiming_uniform_(conv.weight, a=0, mode=\"fan_in\", nonlinearity=\"relu\")\n if conv.bias is not None:\n nn.init.constant_(conv.bias, 0)\n\n def _init_norm(self, norm):\n if norm.weight is not None:\n nn.init.constant_(norm.weight, 1)\n nn.init.constant_(norm.bias, 0)\n\n def _apply(self, fn):\n super(MODNet, self)._apply(fn)\n blurer._apply(fn) # let blurer's device same as modnet\n return self"
},
{
"identifier": "U2NET",
"path": "src/segment/model/u2net.py",
"snippet": "class U2NET(nn.Module):\n def __init__(self, cfgs, out_ch):\n super(U2NET, self).__init__()\n self.out_ch = out_ch\n self._make_layers(cfgs)\n\n def forward(self, x):\n sizes = _size_map(x, self.height)\n maps = [] # storage for maps\n\n # side saliency map\n def unet(x, height=1):\n if height < 6:\n x1 = getattr(self, f\"stage{height}\")(x)\n x2 = unet(getattr(self, \"downsample\")(x1), height + 1)\n x = getattr(self, f\"stage{height}d\")(torch.cat((x2, x1), 1))\n side(x, height)\n return _upsample_like(x, sizes[height - 1]) if height > 1 else x\n else:\n x = getattr(self, f\"stage{height}\")(x)\n side(x, height)\n return _upsample_like(x, sizes[height - 1])\n\n def side(x, h):\n # side output saliency map (before sigmoid)\n x = getattr(self, f\"side{h}\")(x)\n x = _upsample_like(x, sizes[1])\n maps.append(x)\n\n def fuse():\n # fuse saliency probability maps\n maps.reverse()\n x = torch.cat(maps, 1)\n x = getattr(self, \"outconv\")(x)\n maps.insert(0, x)\n # return [torch.sigmoid(x) for x in maps]\n return [x for x in maps]\n\n unet(x)\n maps = fuse()\n return maps\n\n @staticmethod\n def compute_loss(args):\n preds, labels_v = args\n d0, d1, d2, d3, d4, d5, d6 = preds\n loss0 = bce_loss(d0, labels_v)\n loss1 = bce_loss(d1, labels_v)\n loss2 = bce_loss(d2, labels_v)\n loss3 = bce_loss(d3, labels_v)\n loss4 = bce_loss(d4, labels_v)\n loss5 = bce_loss(d5, labels_v)\n loss6 = bce_loss(d6, labels_v)\n\n loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6\n\n return loss0, loss\n\n def _make_layers(self, cfgs):\n self.height = int((len(cfgs) + 1) / 2)\n self.add_module(\"downsample\", nn.MaxPool2d(2, stride=2, ceil_mode=True))\n for k, v in cfgs.items():\n # build rsu block\n self.add_module(k, RSU(v[0], *v[1]))\n if v[2] > 0:\n # build side layer\n self.add_module(\n f\"side{v[0][-1]}\", nn.Conv2d(v[2], self.out_ch, 3, padding=1)\n )\n # build fuse layer\n self.add_module(\n \"outconv\", nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1)\n )"
},
{
"identifier": "U2NET_full2",
"path": "src/segment/model/u2net.py",
"snippet": "def U2NET_full2():\n full = {\n # cfgs for building RSUs and sides\n # {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}\n \"stage1\": [\"En_1\", (8, 3, 32, 64), -1],\n \"stage2\": [\"En_2\", (7, 64, 32, 128), -1],\n \"stage3\": [\"En_3\", (6, 128, 64, 256), -1],\n \"stage4\": [\"En_4\", (5, 256, 128, 512), -1],\n \"stage5\": [\"En_5\", (5, 512, 256, 512, True), -1],\n \"stage6\": [\"En_6\", (5, 512, 256, 512, True), 512],\n \"stage5d\": [\"De_5\", (5, 1024, 256, 512, True), 512],\n \"stage4d\": [\"De_4\", (5, 1024, 128, 256), 256],\n \"stage3d\": [\"De_3\", (6, 512, 64, 128), 128],\n \"stage2d\": [\"De_2\", (7, 256, 32, 64), 64],\n \"stage1d\": [\"De_1\", (8, 128, 16, 64), 64],\n }\n return U2NET(cfgs=full, out_ch=1)"
},
{
"identifier": "U2NET_lite2",
"path": "src/segment/model/u2net.py",
"snippet": "def U2NET_lite2():\n lite = {\n # cfgs for building RSUs and sides\n # {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}\n \"stage1\": [\"En_1\", (8, 3, 16, 64), -1],\n \"stage2\": [\"En_2\", (7, 64, 16, 64), -1],\n \"stage3\": [\"En_3\", (6, 64, 16, 64), -1],\n \"stage4\": [\"En_4\", (5, 64, 16, 64), -1],\n \"stage5\": [\"En_5\", (5, 64, 16, 64, True), -1],\n \"stage6\": [\"En_6\", (5, 64, 16, 64, True), 64],\n \"stage5d\": [\"De_5\", (5, 128, 16, 64, True), 64],\n \"stage4d\": [\"De_4\", (5, 128, 16, 64), 64],\n \"stage3d\": [\"De_3\", (6, 128, 16, 64), 64],\n \"stage2d\": [\"De_2\", (7, 128, 16, 64), 64],\n \"stage1d\": [\"De_1\", (8, 128, 16, 64), 64],\n }\n return U2NET(cfgs=lite, out_ch=1)"
}
] | import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import Trainer
from .model import ISNetDIS, ISNetGTEncoder, U2NET, U2NET_full2, U2NET_lite2, MODNet \
, InSPyReNet, InSPyReNet_Res2Net50, InSPyReNet_SwinB | 7,792 |
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
|
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net": | return U2NET_full2() | 7 | 2023-11-14 22:10:11+00:00 | 12k |
chuzhumin98/LLM_Eval | PRE/process.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # the load path for the data\n self.format = args['format'] # the data format, csv (need a title line) or json (each line is a single data item)\n self.path_prompt = args['path_prompt'] if 'path_prompt' in args else None # the path of prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt \"You need answer a question: {{question}}\", the \"question\" field need to be included in the data\n if not os.path.exists(self.path_data):\n raise FileExistsError(\"Load task data failed: file not exist!\")\n assert self.format in ['csv', 'json']\n \n \n def generate_reader(self):\n if self.format == 'csv':\n with open(self.path_data, encoding='utf-8') as f:\n gen = csv.DictReader(f, skipinitialspace=True)\n elif self.format == 'json':\n gen = open(self.path_data, encoding='utf-8')\n else:\n raise Exception(\"Invalid data format\")\n return gen\n \n def get_prompt(self):\n if self.path_prompt is None:\n raise Exception(\"Exception: missing argument path_prompt\")\n if not os.path.exists(self.path_prompt):\n raise FileExistsError(\"Load task prompt template failed: file not exist!\")\n self.template_prompt = open(self.path_prompt, encoding='utf-8').read().strip()\n \n gen = self.generate_reader()\n \n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n else:\n item = row\n \n prompt = self.template_prompt\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n yield prompt # a generator to return each prompt\n \n def get_task_items(self):\n data_list = []\n gen = self.generate_reader()\n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n elif self.format == 'csv':\n item = dict(row)\n data_list.append(item)\n return data_list"
},
{
"identifier": "Auto_API",
"path": "PRE/api.py",
"snippet": "class Auto_API:\n @staticmethod\n def instantiate_api(api_type, args) -> LLM_API:\n for at, _API in API_type2class_list:\n if api_type == at:\n return _API(args)\n raise Exception(f\"Invalid api_type: {api_type}\")"
},
{
"identifier": "EXAM",
"path": "PRE/exam.py",
"snippet": "class EXAM:\n '''\n Conduct qualified exam, filtering qualified LLMs to become peer reviewers\n '''\n def __init__(self, args) -> None:\n self.source = args['source'] # same or others; same: the evaluated task and responses, others: independent prompts, no need for refer item\n self.mode = args['mode'] # pointwise, pairwise\n self.parser_type = args['parser_type'] # int, float, str\n '''\n If the source is same,\n In pointwise mode, the data consists key \"#index\" (the line index of the task) and key \"#source\" (the LLM to generate the response). The expected evaulate response is an integer or float number;\n In pairwise mode, the data consists key \"#index\" (the line index of the task), key \"#source1\" (the LLM 1 to generate the response) and key \"#source2\" (the LLM 2 to generate the response). The expected evaluate response is three possible token, meaning -1 (1 is better), 0 (tied), 1 (2 is better) respectively\n also, if we conduct reference exam, for each exam data item, it requires key \"#answer\" denotes the gold standard (integer for the pairwise mode)\n '''\n assert self.source in ['same', 'others']\n assert self.mode in ['pointwise', 'pairwise']\n assert self.parser_type in ['int', 'float', 'str']\n if self.parser_type == 'str':\n self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]\n self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_list'].split(',')]\n else:\n self.nominal_list, self.nominal_ticks = None, None\n \n if self.source == 'same': # load generated task data and responses\n path_config_task_data = args['config_task_data']\n self.task_name = args['task_name']\n self.save_dir = args['save_dir'] # the exam result save dir, the exam evaluation save filename = [save_dir] / exam_responses / [task_name]_[model_name].json, each line is one result with json {response: str, result: float/int}\n if not os.path.exists(path_config_task_data):\n raise FileExistsError(\"Load task_data config failed: file not exist!\")\n\n config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config\n data_loader = DataLoader(config_task) # a task data loader\n self.task_data = data_loader.get_task_items()\n self.path_exam_same_data = args['path_exam_same_data']\n self.format_exam_same_data = args['format_exam_same_data']\n else: # load other exam data\n self.path_exam_others_data = args['path_exam_others_data']\n self.format_exam_others_data = args['format_exam_others_data']\n if not os.path.exists(self.path_exam_others_data):\n raise FileExistsError(\"Load exam others mode data failed: file not exist!\")\n self.reference_exam = args['conduct_reference_exam'] # True or False, whether to compare the responses v.s. gold standard\n self.inner_consistency_exam = args['conduct_inner_consistency_exam'] # True or False, whether to conduct inner-consistency exam\n if self.mode == 'pairwise':\n if self.reference_exam:\n self.p_gold = float(args['p_gold']) if 'p_gold' in args else 0.6 # accuarcy v.s. gold standard\n if self.inner_consistency_exam:\n self.p_cons = float(args['p_cons']) if 'p_cons' in args else 0.6 # consistency between two kinds of prompts\n elif self.mode == 'pointwise':\n self.metric_pointwise = args['metric_pointwise'] if 'metric_pointwise' in args else 'EM' # EM (exact match, proportion >= threshold) or MSE (mean square error, mse <= threshold)\n assert self.metric_pointwise in ['EM', \"MSE\"]\n if self.reference_exam:\n if self.metric_pointwise == 'EM':\n self.p_gold = float(args['p_gold']) if 'p_gold' in args else 0.6 # accuarcy v.s. gold standard\n elif self.metric_pointwise == 'MSE':\n self.MSE_acc = float(args['MSE_gold']) if 'MSE_gold' in args else 1. # MSE v.s. gold standard\n \n if self.inner_consistency_exam:\n if self.metric_pointwise == 'EM':\n self.p_cons = float(args['p_cons']) if 'p_cons' in args else 0.6 # consistency between two kinds of prompts\n elif self.metric_pointwise == 'MSE':\n self.MSE_cons = float(args['MSE_cons']) if 'MSE_cons' in args else 1. # MSE between two kinds of prompts\n\n path_prompt = args['path_exam_prompt']\n if not os.path.exists(path_prompt):\n raise FileExistsError(\"Load exam prompt template failed: file not exist!\")\n self.template_prompt = open(path_prompt, encoding='utf-8').read().strip()\n if self.inner_consistency_exam:\n path_prompt2 = args['path_exam_prompt2'] # used in inner consistency exam\n if not os.path.exists(path_prompt2):\n raise FileExistsError(\"Load exam prompt template 2 (used in inner-consistency exam) failed: file not exist!\")\n self.template_prompt2 = open(path_prompt2, encoding='utf-8').read().strip()\n \n if not self.inner_consistency_exam and not self.reference_exam:\n warnings.warn(\"Have not set any qualified exam!\", RuntimeWarning)\n \n \n def load_exam_prompts(self, prompt_template):\n if self.source == 'others':\n loader = DataLoader({\"path_data\": self.path_exam_others_data,\n \"format\": self.format_exam_others_data,})\n data_others = loader.get_task_items()\n prompts = []\n for item in data_others:\n prompt = prompt_template\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n prompts.append(prompt)\n if self.reference_exam:\n answers = [item['#answer'] for item in data_others]\n else:\n answers = None\n return prompts, answers\n elif self.source == 'same':\n loader = DataLoader({\"path_data\": self.path_exam_same_data,\n \"format\": self.format_exam_same_data,})\n samples_same = loader.get_task_items()\n evaluatees_list = set()\n if self.mode == 'pointwise':\n for sample in samples_same:\n evaluatees_list.add(sample['#source'])\n elif self.mode == 'pairwise':\n for sample in samples_same:\n evaluatees_list.add(sample['#source1'])\n evaluatees_list.add(sample['#source2'])\n responses_evaluatee_dict = dict()\n for ev in evaluatees_list:\n responses = [] # responses list for evaluatee ev\n path = f\"{self.save_dir}/task_responses/{self.task_name}_{ev}.json\"\n if not os.path.exists(path):\n raise FileExistsError(f\"Load {path} failed: file not exist!\")\n with open(path, 'r') as f:\n while True:\n line = f.readline().strip()\n if line:\n response = json.loads(line)['response']\n responses.append(response)\n else:\n break\n responses_evaluatee_dict[ev] = responses\n \n prompts = []\n for sample in samples_same:\n sidx = sample['#index']\n task = dict(self.task_data[sidx])\n if self.mode == 'pointwise':\n src = sample['#source']\n task['#source'] = responses_evaluatee_dict[src][sidx]\n elif self.mode == 'pairwise':\n src1 = sample['#source1']\n src2 = sample['#source2']\n task['#source1'] = responses_evaluatee_dict[src1][sidx]\n task['#source2'] = responses_evaluatee_dict[src2][sidx]\n prompt = prompt_template\n for key in task:\n prompt = prompt.replace(\"{{\" + key + \"}}\", task[key])\n prompts.append(prompt)\n \n if self.reference_exam:\n answers = [item['#answer'] for item in samples_same]\n else:\n answers = None\n return prompts, answers\n \n def calculate_metric(self, resultsA, resultsB) -> float: \n '''\n Calculate the evaluation metric between resultsA and resultsB\n pointwise or pairwise; EM/accuary or MSE (minus)\n '''\n assert len(resultsA) == len(resultsB)\n assert len(resultsA) > 0\n N = len(resultsA)\n p = 0.\n if self.mode == 'pairwise':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n if r * a > 0:\n p += 1.\n elif r * a == 0:\n p += .5\n \n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n if r == a:\n p += 1.\n elif self.metric_pointwise == 'MSE':\n for j in range(N):\n r, a = resultsA[j], resultsB[j]\n p -= (r - a) ** 2\n\n p /= float(N)\n return p\n \n \n def conduct_exam(self, config_api_evaluator):\n '''\n Conduct qualified exam, return a list of qualified apis with the same format of list config_api_evaluator, and their scores [score_list (refer acc, inner acc) for each qualified LLM], MSE will put the minus one\n '''\n apis = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in config_api_evaluator]\n if not self.inner_consistency_exam and not self.reference_exam:\n return config_api_evaluator, [[] for _ in config_api_evaluator]\n \n prompts, answers = self.load_exam_prompts(self.template_prompt)\n if self.inner_consistency_exam:\n prompts2, answers2 = self.load_exam_prompts(self.template_prompt2)\n \n os.makedirs(f\"{self.save_dir}/exam_responses\", exist_ok=True)\n qualified_apis, scores_qualified = [], [] # configs of these qualified apis, its corresponding api\n for i, api in enumerate(apis):\n path_out = f\"{self.save_dir}/exam_responses/{self.task_name}_{api.model_name}.json\"\n\n if os.path.exists(path_out):\n data = open(path_out).readlines()\n else:\n data = []\n if len(data) < len(prompts):\n fout = open(path_out, 'w')\n for line in data:\n fout.write(line)\n for prompt in prompts[len(data):]:\n response_orig = api.chat(prompt)\n result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)\n line = json.dumps({\"response\": response_orig,\n 'result': result_parse})\n data.append(line)\n fout.write(line + '\\n')\n fout.close()\n results = [json.loads(line.strip())['result'] for line in data]\n \n eval_this = [config_api_evaluator[i]]\n \n if self.reference_exam:\n p_refer = self.calculate_metric(results, answers)\n p_thre = None\n if self.mode == 'pairwise':\n p_thre = self.p_gold\n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n p_thre = self.p_gold\n elif self.metric_pointwise == 'MSE':\n p_thre = -self.MSE_acc\n \n if p_refer < p_thre:\n print(f'model {api.model_name} failed to pass the reference exam')\n continue\n eval_this.append(p_refer)\n \n if self.inner_consistency_exam:\n path_out = f\"{self.save_dir}/exam_responses/{self.task_name}_{api.model_name}__prompt2.json\"\n\n if os.path.exists(path_out):\n data = open(path_out).readlines()\n else:\n data = []\n if len(data) < len(prompts2):\n fout = open(path_out, 'w')\n for line in data:\n fout.write(line)\n for prompt in prompts2[len(data):]:\n response_orig = api.chat(prompt)\n result_parse = parse_response(response_orig, self.parser_type, self.nominal_list, self.nominal_ticks)\n line = json.dumps({\"response\": response_orig,\n 'result': result_parse})\n data.append(line)\n fout.write(line + '\\n')\n fout.close()\n results2 = [json.loads(line.strip())['result'] for line in data]\n\n p_inner = self.calculate_metric(results, results2)\n p_thre = None\n if self.mode == 'pairwise':\n p_thre = self.p_cons\n elif self.mode == 'pointwise':\n if self.metric_pointwise == 'EM':\n p_thre = self.p_cons\n elif self.metric_pointwise == 'MSE':\n p_thre = -self.MSE_cons\n \n if p_inner < p_thre:\n print(f'model {api.model_name} failed to pass the inner-consistency exam')\n continue\n eval_this.append(p_inner)\n \n qualified_apis.append(config_api_evaluator[i])\n scores_qualified.append(eval_this)\n return qualified_apis, scores_qualified"
},
{
"identifier": "PRE",
"path": "PRE/eval.py",
"snippet": "class PRE:\n def __init__(self, args) -> None:\n path_config_eval = args['config_eval']\n if not os.path.exists(path_config_eval):\n raise FileExistsError(\"Load config eval failed: file not exist!\")\n args = copy.deepcopy(args)\n config_eval = yaml.load(open(path_config_eval, 'r'), Loader=yaml.FullLoader)\n args.update(config_eval)\n self.strategy = args['strategy'] # full, ELO, Glicko\n self.mode = args['mode'] # pointwise, pairwise\n if self.strategy in ['ELO', 'Glicko']:\n self.mode = 'pairwise' # sampling strategy, default with pairwise mode\n args['mode'] = 'pairwise'\n assert self.strategy in ['full', 'ELO', 'Glicko']\n assert self.mode in ['pointwise', 'pairwise']\n self.weighted_method = args['weighted_method'] # uniform, log (only accuary/consistency), exp, poly (only accuary/consistency)\n '''\n uniform: the equal weight\n log: log(p) - log(1-p)\n exp: exp(alpha * p)\n poly: p ^ alpha\n '''\n self.alpha = args['alpha'] if 'alpha' in args else 1.\n self.w_gold = args['w_gold'] if 'w_gold' in args else 0.5 # w_gold * s_gold + (1-w_gold) * s_consistency, only used when both of them are used in exam module\n self.evaluators_config = yaml.load_all(open(args['config_api_evaluator'], 'r'), Loader=yaml.FullLoader) # the config of evaluators\n self.evaluators_config = [cf for cf in self.evaluators_config]\n self.evaluator_model_names = [ev['model_name'] for ev in self.evaluators_config]\n self.save_dir = args['save_dir']\n self.task_name = args['task_name']\n # print(f\"evaluatee config: {args['config_api_evaluatee']}\")\n if 'config_api_evaluatee' in args:\n config_apis = yaml.load_all(open(args['config_api_evaluatee'], 'r'), Loader=yaml.FullLoader) # series of APIs\n self.evaluatee_LLM_names = [config_api['model_name'] for config_api in config_apis]\n else:\n self.evaluatee_LLM_names = args['evaluatee_names'].split(',')\n \n self.loader_data = EvalDataLoader(args)\n self.review = PEER_REVIEW(args)\n self.weights = self.weighted_function(args['scores_evaluators']) # the pre-compute weights of each evaluator based on their scores\n return\n \n def load_batch_data(self):\n prompts = self.loader_data.get_full_prompts()\n self.review.peer_review_batch(self.evaluators_config, prompts) # generate the peer review results of each evaluator\n ### load evaluation results\n results = dict()\n for ev_model_name in self.evaluator_model_names:\n path_ev = f\"{self.save_dir}/evaluation_responses/{self.task_name}_{ev_model_name}.json\"\n results_thisllm = []\n with open(path_ev, 'r') as f:\n while True:\n line = f.readline().strip()\n if line:\n results_thisllm.append(json.loads(line))\n else:\n break\n results[ev_model_name] = results_thisllm\n return results\n \n def evaluate(self):\n '''\n the unified api for evaluate, control the whole evaluation procedure\n '''\n if self.strategy == 'full':\n self.evaluate_full()\n else:\n self.evaluate_sample()\n\n def evaluate_full(self):\n '''\n evaluate with the full strategy\n '''\n results = self.load_batch_data()\n ### evaluate with majority voting\n os.makedirs(f\"{self.save_dir}/evaluation_results\", exist_ok=True)\n print(self.evaluatee_LLM_names)\n if self.mode == 'pointwise':\n results_perllm = dict() # evaluate dict of each evaluatee\n for ev in self.evaluator_model_names:\n results_ev = results[ev]\n for item in results_ev:\n model, task_id, label = item['model'], item['task_id'], item['result']\n if model not in results_perllm:\n results_perllm[model] = dict()\n if task_id not in results_perllm[model]:\n results_perllm[model][task_id] = []\n results_perllm[model][task_id].append(label)\n outputs = dict()\n for model in results_perllm:\n outputs[model] = []\n for task_id in results_perllm[model]:\n outputs[model].append(self.aggregate_reviewers_results(results_perllm[model][task_id], self.weights))\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.json\"\n json.dump(outputs, open(path_res, 'w'))\n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.txt\", 'w') as f:\n for model in outputs:\n mean_val = np.mean(outputs[model])\n print(f'model {model}: {mean_val}')\n f.write(f'model {model}: {mean_val}\\n')\n elif self.mode == 'pairwise':\n results_perllm = dict() # evaluate dict of each evaluatee\n for i, ev in enumerate(self.evaluator_model_names):\n results_ev = results[ev]\n for item in results_ev:\n modelA, modelB, task_id, label = item['modelA'], item['modelB'], item['task_id'], item['result']\n if modelA <= modelB:\n key = f'{modelA}%{modelB}'\n else:\n key = f'{modelB}%{modelA}'\n label = -label # reversed the preference label if modelB v.s. modelA\n \n if key not in results_perllm:\n results_perllm[key] = dict()\n if task_id not in results_perllm[key]:\n results_perllm[key][task_id] = []\n if len(results_perllm[key][task_id]) < i + 1:\n results_perllm[key][task_id].append([])\n results_perllm[key][task_id][i].append(label)\n outputs = dict()\n for key in results_perllm:\n outputs[key] = []\n for task_id in results_perllm[key]:\n outputs[key].append(self.aggregate_reviewers_results(results_perllm[key][task_id], self.weights))\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.json\"\n json.dump(outputs, open(path_res, 'w'))\n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'w') as f:\n evaluatees_dict = {ev: i for i, ev in enumerate(self.evaluatee_LLM_names)}\n accs = np.zeros([len(self.evaluatee_LLM_names), len(self.evaluatee_LLM_names)], dtype=np.float)\n for key in outputs:\n mA, mB = key.split('%')\n idxA, idxB = evaluatees_dict[mA], evaluatees_dict[mB]\n res = np.array(outputs[key])\n mean_val = np.mean(res == 1) + np.mean(res == 0) * 0.5\n accs[idxA, idxB] = mean_val\n accs[idxB, idxA] = 1. - mean_val\n f.write(','.join(['']+self.evaluatee_LLM_names) + '\\n')\n for i in range(len(self.evaluatee_LLM_names)):\n f.write(','.join([self.evaluatee_LLM_names[i]] + [str(num) for num in accs[i]]) + '\\n')\n lines = open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'r').readlines()\n print(''.join(lines))\n \n def evaluate_sample(self):\n '''\n evaluate with sampling strategies (e.g. ELO, Glicko)\n '''\n results = self.load_batch_data()\n ### only for pairwise mode\n os.makedirs(f\"{self.save_dir}/evaluation_results\", exist_ok=True)\n results_perllm = dict() # evaluate dict of each evaluatee\n for i, ev in enumerate(self.evaluator_model_names):\n results_ev = results[ev]\n for item in results_ev:\n print(item)\n modelA, modelB, task_id, label = item['modelA'], item['modelB'], item['task_id'], item['result']\n if modelA <= modelB:\n key = f'{modelA}%{modelB}'\n else:\n key = f'{modelB}%{modelA}'\n label = -label # reversed the preference label if modelB v.s. modelA\n \n if key not in results_perllm:\n results_perllm[key] = dict()\n if task_id not in results_perllm[key]:\n results_perllm[key][task_id] = []\n if len(results_perllm[key][task_id]) < i + 1:\n results_perllm[key][task_id].append([])\n results_perllm[key][task_id][i].append(label)\n games_list = []\n evaluatees_dict = {ev: i for i, ev in enumerate(self.evaluatee_LLM_names)}\n\n for key in results_perllm:\n mA, mB = key.split('%')\n idxA, idxB = evaluatees_dict[mA], evaluatees_dict[mB]\n for task_id in results_perllm[key]:\n games_list.append([idxA, idxB, self.aggregate_reviewers_results(results_perllm[key][task_id], self.weights)])\n indexes = np.array(range(len(games_list)))\n np.random.shuffle(indexes) # randomize the game order\n path_res = f\"{self.save_dir}/evaluation_results/{self.task_name}_result_detail.txt\"\n fout = open(path_res, 'w')\n if self.strategy == 'ELO': # we set K = 16\n def elo_expect_win_rate(x): # x is the ELO difference\n return 1. / (1. + 10. ** (x / 400.))\n rates = [1000., 1000.]\n K = 16.\n for r, idx in enumerate(indexes):\n roleA, roleB, label = games_list[idx]\n eA = elo_expect_win_rate(rates[roleB] - rates[roleA])\n eB = 1. - eA\n sB = (1. + label) / 2. # -1 -> 0, 0 -> 0.5, 1 -> 1\n sA = 1. - sB\n rates[roleA] += K * (sA - eA)\n rates[roleB] += K * (sB - eB)\n fout.write(f\"After round {r}, ELO rate: {rates}\\n\")\n elif self.strategy == 'Glicko':\n # TODO\n pass\n fout.close()\n \n with open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'w') as f:\n f.write(f\"Final {self.strategy} rate leaderboard:\\n\")\n ranks = np.argsort(-np.array(rates))\n for r in ranks:\n f.write(f\"{self.evaluatee_LLM_names[r]}: {rates[r]}\\n\")\n lines = open(f\"{self.save_dir}/evaluation_results/{self.task_name}_result_overview.csv\", 'r').readlines()\n print(''.join(lines))\n \n def weighted_function(self, scores):\n '''\n return the weight (normalized) of each LLM, with the given weighted method and parameter (alpha and w_gold)\n '''\n assert len(scores) > 0\n N = len(scores)\n if len(scores[0]) == 0 or self.weighted_method == 'uniform': # when no exam or uniform strategy, equal weight\n p = 1. / float(N)\n return np.array([p for _ in range(N)])\n elif self.weighted_method == 'log':\n ws = np.log([s[0] for s in scores]) - np.log([1. - s[0] for s in scores])\n if len(scores[0]) > 1:\n ws2 = np.log([s[1] for s in scores]) - np.log([1. - s[1] for s in scores])\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n elif self.weighted_method == 'exp':\n ws = np.exp(self.alpha * np.array([s[0] for s in scores]))\n if len(scores[0]) > 1:\n ws2 = np.exp(self.alpha * np.array([s[1] for s in scores]))\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n elif self.weighted_method == 'poly':\n ws = np.array([s[0] for s in scores]) ** self.alpha\n if len(scores[0]) > 1:\n ws2 = np.array([s[1] for s in scores]) ** self.alpha\n ws = self.w_gold * ws + (1-self.w_gold) * ws2\n ws /= np.sum(ws)\n return ws\n else:\n raise Exception(\"Unexpected parameter weighted_method!\")\n\n \n def aggregate_reviewers_results(self, results, weights):\n '''\n aggregate results with the given weights\n if mode == 'pointwise', results and weights are all (N) array, N is the size of evaluators; weighted sum\n if mode == 'pairwise', results are (N, 2) array, and weights are (N) array; majority voting, pairwise is already aligned, i.e., if B ~ A is better, then convert into A ~ B is worse\n '''\n assert len(results) == len(weights)\n if self.mode == 'pointwise':\n return sum([results[i] * weights[i] for i in range(len(weights))])\n elif self.mode == 'pairwise':\n cnt_pos, cnt_neg = 0., 0.\n for items in results:\n for item in items:\n if item > 0:\n cnt_pos += 1.\n elif item < 0:\n cnt_neg += 1.\n if cnt_pos > cnt_neg:\n return 1\n elif cnt_pos < cnt_neg:\n return -1\n else:\n return 0"
}
] | import os
import yaml
import json, csv
import copy
import sys
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.exam import EXAM
from PRE.eval import PRE | 7,721 | '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
data_loader = DataLoader(config_task) # a task data loader
| '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
data_loader = DataLoader(config_task) # a task data loader | apis = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in config_apis] # store for all valid apis | 1 | 2023-11-16 18:40:23+00:00 | 12k |
believethehype/nostrdvm | nostr_dvm/dvm.py | [
{
"identifier": "EventDefinitions",
"path": "nostr_dvm/utils/definitions.py",
"snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]"
},
{
"identifier": "RequiredJobToWatch",
"path": "nostr_dvm/utils/definitions.py",
"snippet": "class RequiredJobToWatch:\n event: Event\n timestamp: int"
},
{
"identifier": "JobToWatch",
"path": "nostr_dvm/utils/definitions.py",
"snippet": "class JobToWatch:\n event: str\n timestamp: int\n is_paid: bool\n amount: int\n status: str\n result: str\n is_processed: bool\n bolt11: str\n payment_hash: str\n expires: int"
},
{
"identifier": "DVMConfig",
"path": "nostr_dvm/utils/dvmconfig.py",
"snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess"
},
{
"identifier": "admin_make_database_updates",
"path": "nostr_dvm/utils/admin_utils.py",
"snippet": "def admin_make_database_updates(adminconfig: AdminConfig = None, dvmconfig: DVMConfig = None, client: Client = None):\n # This is called on start of Server, Admin function to manually whitelist/blacklist/add balance/delete users\n if adminconfig is None or dvmconfig is None:\n return\n\n if not isinstance(adminconfig, AdminConfig):\n return\n\n if ((\n adminconfig.WHITELISTUSER is True or adminconfig.UNWHITELISTUSER is True or adminconfig.BLACKLISTUSER is True or adminconfig.DELETEUSER is True)\n and adminconfig.USERNPUB == \"\"):\n return\n\n if adminconfig.UPDATE_PROFILE and (dvmconfig.NIP89 is None):\n return\n\n if adminconfig.DELETE_NIP89 and (adminconfig.EVENTID == \"\" or adminconfig.EVENTID == \"\"):\n return\n\n db = dvmconfig.DB\n\n if str(adminconfig.USERNPUB).startswith(\"npub\"):\n publickey = PublicKey.from_bech32(adminconfig.USERNPUB).to_hex()\n else:\n publickey = adminconfig.USERNPUB\n\n if adminconfig.WHITELISTUSER:\n user = get_or_add_user(db, publickey, client=client, config=dvmconfig)\n update_sql_table(db, user.npub, user.balance, True, False, user.nip05, user.lud16, user.name, user.lastactive)\n user = get_from_sql_table(db, publickey)\n print(str(user.name) + \" is whitelisted: \" + str(user.iswhitelisted))\n\n if adminconfig.UNWHITELISTUSER:\n user = get_from_sql_table(db, publickey)\n update_sql_table(db, user.npub, user.balance, False, False, user.nip05, user.lud16, user.name, user.lastactive)\n\n if adminconfig.BLACKLISTUSER:\n user = get_from_sql_table(db, publickey)\n update_sql_table(db, user.npub, user.balance, False, True, user.nip05, user.lud16, user.name, user.lastactive)\n\n if adminconfig.DELETEUSER:\n delete_from_sql_table(db, publickey)\n\n if adminconfig.ClEANDB:\n clean_db(db)\n\n if adminconfig.LISTDATABASE:\n list_db(db)\n\n if adminconfig.REBROADCAST_NIP89:\n nip89_announce_tasks(dvmconfig, client=client)\n\n if adminconfig.DELETE_NIP89:\n event_id = adminconfig.EVENTID\n keys = Keys.from_sk_str(\n adminconfig.PRIVKEY) # Private key from sender of Event (e.g. the key of an nip89 announcement you want to delete)\n fetch_nip89_paramters_for_deletion(keys, event_id, client, dvmconfig)\n\n if adminconfig.UPDATE_PROFILE:\n update_profile(dvmconfig, client, lud16=adminconfig.LUD16)"
},
{
"identifier": "AdminConfig",
"path": "nostr_dvm/utils/admin_utils.py",
"snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\""
},
{
"identifier": "get_amount_per_task",
"path": "nostr_dvm/utils/backend_utils.py",
"snippet": "def get_amount_per_task(task, dvm_config, duration=1):\n # duration is either static 1 (for images etc) or in seconds by default (e.g. audio/video)\n for dvm in dvm_config.SUPPORTED_DVMS: # this is currently just one\n if dvm.TASK == task:\n amount = dvm.FIX_COST + (dvm.PER_UNIT_COST * duration)\n return amount\n else:\n print(\"[\" + dvm_config.SUPPORTED_DVMS[\n 0].NAME + \"] Task \" + task + \" is currently not supported by this instance, skipping\")\n return None"
},
{
"identifier": "check_task_is_supported",
"path": "nostr_dvm/utils/backend_utils.py",
"snippet": "def check_task_is_supported(event: Event, client, config=None):\n try:\n dvm_config = config\n # Check for generic issues, event maformed, referenced event not found etc..\n if not is_input_supported_generic(event.tags(), client, dvm_config):\n return False, \"\"\n\n # See if current dvm supports the task\n task = get_task(event, client=client, dvm_config=dvm_config)\n if task not in (x.TASK for x in dvm_config.SUPPORTED_DVMS):\n return False, task\n # See if current dvm can handle input for given task\n for dvm in dvm_config.SUPPORTED_DVMS:\n if dvm.TASK == task:\n if not dvm.is_input_supported(event.tags(), client, config):\n return False, task\n return True, task\n\n\n except Exception as e:\n print(\"Check task: \" + str(e))"
},
{
"identifier": "get_task",
"path": "nostr_dvm/utils/backend_utils.py",
"snippet": "def get_task(event, client, dvm_config):\n try:\n if event.kind() == EventDefinitions.KIND_NIP90_GENERIC: # use this for events that have no id yet, inclufr j tag\n for tag in event.tags():\n if tag.as_vec()[0] == 'j':\n return tag.as_vec()[1]\n else:\n return \"unknown job: \" + event.as_json()\n elif event.kind() == EventDefinitions.KIND_DM: # dm\n for tag in event.tags():\n if tag.as_vec()[0] == 'j':\n return tag.as_vec()[1]\n else:\n return \"unknown job: \" + event.as_json()\n\n # This looks a bit more complicated, but we do several tasks for text-extraction in the future\n elif event.kind() == EventDefinitions.KIND_NIP90_EXTRACT_TEXT:\n for tag in event.tags():\n if tag.as_vec()[0] == \"i\":\n if tag.as_vec()[2] == \"url\":\n file_type = check_url_is_readable(tag.as_vec()[1])\n print(file_type)\n if file_type == \"pdf\":\n return \"pdf-to-text\"\n elif file_type == \"audio\" or file_type == \"video\":\n return \"speech-to-text\"\n elif file_type == \"image\":\n return \"image-to-text\"\n else:\n return \"unknown job\"\n elif tag.as_vec()[2] == \"event\":\n evt = get_event_by_id(tag.as_vec()[1], client=client, config=dvm_config)\n if evt is not None:\n if evt.kind() == 1063:\n for tg in evt.tags():\n if tg.as_vec()[0] == 'url':\n file_type = check_url_is_readable(tg.as_vec()[1])\n if file_type == \"pdf\":\n return \"pdf-to-text\"\n elif file_type == \"audio\" or file_type == \"video\":\n return \"speech-to-text\"\n else:\n return \"unknown job\"\n else:\n return \"unknown type\"\n else:\n return \"unknown job\"\n elif event.kind() == EventDefinitions.KIND_NIP90_GENERATE_IMAGE:\n has_image_tag = False\n has_text_tag = False\n for tag in event.tags():\n if tag.as_vec()[0] == \"i\":\n if tag.as_vec()[2] == \"url\":\n file_type = check_url_is_readable(tag.as_vec()[1])\n if file_type == \"image\":\n has_image_tag = True\n print(\"found image tag\")\n elif tag.as_vec()[2] == \"job\":\n evt = get_referenced_event_by_id(event_id=tag.as_vec()[1], kinds=\n [EventDefinitions.KIND_NIP90_RESULT_EXTRACT_TEXT,\n EventDefinitions.KIND_NIP90_RESULT_TRANSLATE_TEXT,\n EventDefinitions.KIND_NIP90_RESULT_SUMMARIZE_TEXT],\n client=client,\n dvm_config=dvm_config)\n if evt is not None:\n file_type = check_url_is_readable(evt.content())\n if file_type == \"image\":\n has_image_tag = True\n elif tag.as_vec()[2] == \"text\":\n has_text_tag = True\n\n if has_image_tag:\n return \"image-to-image\"\n elif has_text_tag and not has_image_tag:\n return \"text-to-image\"\n # TODO if a task can consist of multiple inputs add them here\n # This is not ideal. Maybe such events should have their own kind\n\n # else if kind is supported, simply return task\n else:\n\n for dvm in dvm_config.SUPPORTED_DVMS:\n if dvm.KIND == event.kind():\n return dvm.TASK\n except Exception as e:\n print(\"Get task: \" + str(e))\n\n return \"unknown type\""
},
{
"identifier": "create_sql_table",
"path": "nostr_dvm/utils/database_utils.py",
"snippet": "def create_sql_table(db):\n try:\n import os\n if not os.path.exists(r'db'):\n os.makedirs(r'db')\n if not os.path.exists(r'outputs'):\n os.makedirs(r'outputs')\n con = sqlite3.connect(db)\n cur = con.cursor()\n cur.execute(\"\"\" CREATE TABLE IF NOT EXISTS users (\n npub text PRIMARY KEY,\n sats integer NOT NULL,\n iswhitelisted boolean,\n isblacklisted boolean,\n nip05 text,\n lud16 text,\n name text,\n lastactive integer\n ); \"\"\")\n cur.execute(\"SELECT name FROM sqlite_master\")\n con.close()\n\n except Error as e:\n print(e)"
},
{
"identifier": "get_or_add_user",
"path": "nostr_dvm/utils/database_utils.py",
"snippet": "def get_or_add_user(db, npub, client, config, update=False):\n user = get_from_sql_table(db, npub)\n if user is None:\n try:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n print(\"Adding User: \" + npub + \" (\" + npub + \")\")\n add_to_sql_table(db, npub, config.NEW_USER_BALANCE, False, False, nip05,\n lud16, name, Timestamp.now().as_secs())\n user = get_from_sql_table(db, npub)\n return user\n except Exception as e:\n print(\"Error Adding User to DB: \" + str(e))\n elif update:\n try:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n print(\"Updating User: \" + npub + \" (\" + npub + \")\")\n update_sql_table(db, user.npub, user.balance, user.iswhitelisted, user.isblacklisted, nip05,\n lud16, name, Timestamp.now().as_secs())\n user = get_from_sql_table(db, npub)\n return user\n except Exception as e:\n print(\"Error Updating User in DB: \" + str(e))\n\n return user"
},
{
"identifier": "update_user_balance",
"path": "nostr_dvm/utils/database_utils.py",
"snippet": "def update_user_balance(db, npub, additional_sats, client, config):\n user = get_from_sql_table(db, npub)\n if user is None:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n add_to_sql_table(db, npub, (int(additional_sats) + config.NEW_USER_BALANCE), False, False,\n nip05, lud16, name, Timestamp.now().as_secs())\n print(\"Adding User: \" + npub + \" (\" + npub + \")\")\n else:\n user = get_from_sql_table(db, npub)\n new_balance = int(user.balance) + int(additional_sats)\n update_sql_table(db, npub, new_balance, user.iswhitelisted, user.isblacklisted, user.nip05, user.lud16,\n user.name,\n Timestamp.now().as_secs())\n print(\"Updated user balance for: \" + str(user.name) +\n \" Zap amount: \" + str(additional_sats) + \" Sats. New balance: \" + str(new_balance) +\" Sats\")\n\n if config is not None:\n keys = Keys.from_sk_str(config.PRIVATE_KEY)\n #time.sleep(1.0)\n\n message = (\"Added \" + str(additional_sats) + \" Sats to balance. New balance is \" + str(new_balance) + \" Sats.\")\n\n evt = EventBuilder.new_encrypted_direct_msg(keys, PublicKey.from_hex(npub), message,\n None).to_event(keys)\n send_event(evt, client=client, dvm_config=config)"
},
{
"identifier": "update_sql_table",
"path": "nostr_dvm/utils/database_utils.py",
"snippet": "def update_sql_table(db, npub, balance, iswhitelisted, isblacklisted, nip05, lud16, name, lastactive):\n try:\n con = sqlite3.connect(db)\n cur = con.cursor()\n data = (balance, iswhitelisted, isblacklisted, nip05, lud16, name, lastactive, npub)\n\n cur.execute(\"\"\" UPDATE users\n SET sats = ? ,\n iswhitelisted = ? ,\n isblacklisted = ? ,\n nip05 = ? ,\n lud16 = ? ,\n name = ? ,\n lastactive = ?\n WHERE npub = ?\"\"\", data)\n con.commit()\n con.close()\n except Error as e:\n print(\"Error Updating DB: \" + str(e))"
},
{
"identifier": "input_data_file_duration",
"path": "nostr_dvm/utils/mediasource_utils.py",
"snippet": "def input_data_file_duration(event, dvm_config, client, start=0, end=0):\n # print(\"[\" + dvm_config.NIP89.NAME + \"] Getting Duration of the Media file..\")\n input_value = \"\"\n input_type = \"\"\n for tag in event.tags():\n if tag.as_vec()[0] == 'i':\n input_value = tag.as_vec()[1]\n input_type = tag.as_vec()[2]\n\n if input_type == \"text\":\n return len(input_value)\n\n if input_type == \"event\": # NIP94 event\n evt = get_event_by_id(input_value, client=client, config=dvm_config)\n if evt is not None:\n input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)\n if input_type == \"text\":\n # For now, ingore length of any text, just return 1.\n return len(input_value)\n\n if input_type == \"url\":\n source_type = check_source_type(input_value)\n\n filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, True)\n if type != \"audio\" and type != \"video\":\n return 1\n if filename == \"\" or filename is None:\n return 0\n try:\n file_reader = AudioReader(filename, ctx=cpu(0), mono=False)\n duration = float(file_reader.duration())\n except Exception as e:\n print(e)\n return 0\n print(\"Original Duration of the Media file: \" + str(duration))\n start_time, end_time, new_duration = (\n convert_media_length(start, end, duration))\n print(\"New Duration of the Media file: \" + str(new_duration))\n return new_duration\n\n return 1"
},
{
"identifier": "get_event_by_id",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def get_event_by_id(event_id: str, client: Client, config=None) -> Event | None:\n split = event_id.split(\":\")\n if len(split) == 3:\n pk = PublicKey.from_hex(split[1])\n id_filter = Filter().author(pk).custom_tag(Alphabet.D, [split[2]])\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n else:\n if str(event_id).startswith('note'):\n event_id = EventId.from_bech32(event_id)\n elif str(event_id).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(event_id).event_id()\n elif str(event_id).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(event_id)\n elif str(event_id).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(event_id).event_id()\n\n else:\n event_id = EventId.from_hex(event_id)\n\n id_filter = Filter().id(event_id).limit(1)\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n if len(events) > 0:\n\n return events[0]\n else:\n return None"
},
{
"identifier": "get_referenced_event_by_id",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def get_referenced_event_by_id(event_id, client, dvm_config, kinds) -> Event | None:\n if kinds is None:\n kinds = []\n if str(event_id).startswith('note'):\n event_id = EventId.from_bech32(event_id)\n elif str(event_id).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(event_id).event_id()\n elif str(event_id).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(event_id)\n elif str(event_id).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(event_id).event_id()\n else:\n event_id = EventId.from_hex(event_id)\n\n if len(kinds) > 0:\n job_id_filter = Filter().kinds(kinds).event(event_id).limit(1)\n else:\n job_id_filter = Filter().event(event_id).limit(1)\n\n events = client.get_events_of([job_id_filter], timedelta(seconds=dvm_config.RELAY_TIMEOUT))\n\n if len(events) > 0:\n return events[0]\n else:\n return None"
},
{
"identifier": "send_event",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def send_event(event: Event, client: Client, dvm_config) -> EventId:\n try:\n relays = []\n\n for tag in event.tags():\n if tag.as_vec()[0] == 'relays':\n for index, param in enumerate(tag.as_vec()):\n if index != 0:\n relays.append(tag.as_vec()[index])\n\n for relay in relays:\n if relay not in dvm_config.RELAY_LIST:\n client.add_relay(relay)\n\n event_id = client.send_event(event)\n\n for relay in relays:\n if relay not in dvm_config.RELAY_LIST:\n client.remove_relay(relay)\n\n return event_id\n except Exception as e:\n print(e)"
},
{
"identifier": "check_and_decrypt_tags",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def check_and_decrypt_tags(event, dvm_config):\n try:\n\n is_encrypted = False\n p = \"\"\n for tag in event.tags():\n if tag.as_vec()[0] == 'encrypted':\n is_encrypted = True\n elif tag.as_vec()[0] == 'p':\n p = tag.as_vec()[1]\n\n if is_encrypted:\n if p != dvm_config.PUBLIC_KEY:\n print(\"[\" + dvm_config.NIP89.NAME + \"] Task encrypted and not addressed to this DVM, \"\n \"skipping..\")\n return None\n\n elif p == dvm_config.PUBLIC_KEY:\n tags_str = nip04_decrypt(Keys.from_sk_str(dvm_config.PRIVATE_KEY).secret_key(),\n event.pubkey(), event.content())\n params = json.loads(tags_str)\n params.append(Tag.parse([\"p\", p]).as_vec())\n params.append(Tag.parse([\"encrypted\"]).as_vec())\n event_as_json = json.loads(event.as_json())\n event_as_json['tags'] = params\n event_as_json['content'] = \"\"\n event = Event.from_json(json.dumps(event_as_json))\n except Exception as e:\n print(e)\n\n return event"
},
{
"identifier": "build_status_reaction",
"path": "nostr_dvm/utils/output_utils.py",
"snippet": "def build_status_reaction(status, task, amount, content):\n alt_description = \"This is a reaction to a NIP90 DVM AI task. \"\n\n if status == \"processing\":\n alt_description = \"NIP90 DVM AI task \" + task + \" started processing. \"\n reaction = alt_description + emoji.emojize(\":thumbs_up:\")\n elif status == \"success\":\n alt_description = \"NIP90 DVM AI task \" + task + \" finished successfully. \"\n reaction = alt_description + emoji.emojize(\":call_me_hand:\")\n elif status == \"chain-scheduled\":\n alt_description = \"NIP90 DVM AI task \" + task + \" Chain Task scheduled\"\n reaction = alt_description + emoji.emojize(\":thumbs_up:\")\n elif status == \"error\":\n alt_description = \"NIP90 DVM AI task \" + task + \" had an error. \"\n if content is None:\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n else:\n reaction = alt_description + emoji.emojize(\":thumbs_down:\") + \" \" + content\n\n elif status == \"payment-required\":\n alt_description = \"NIP90 DVM AI task \" + task + \" requires payment of min \" + str(\n amount) + \" Sats. \"\n reaction = alt_description + emoji.emojize(\":orange_heart:\")\n\n elif status == \"payment-rejected\":\n alt_description = \"NIP90 DVM AI task \" + task + \" payment is below required amount of \" + str(\n amount) + \" Sats. \"\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n elif status == \"user-blocked-from-service\":\n alt_description = \"NIP90 DVM AI task \" + task + \" can't be performed. User has been blocked from Service. \"\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n else:\n reaction = emoji.emojize(\":thumbs_down:\")\n\n return alt_description, reaction"
},
{
"identifier": "check_bolt11_ln_bits_is_paid",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def check_bolt11_ln_bits_is_paid(payment_hash: str, config):\n url = config.LNBITS_URL + \"/api/v1/payments/\" + payment_hash\n headers = {'X-API-Key': config.LNBITS_INVOICE_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.get(url, headers=headers, proxies=proxies)\n obj = json.loads(res.text)\n if obj.get(\"paid\"):\n return obj[\"paid\"]\n else:\n return False\n except Exception as e:\n return None"
},
{
"identifier": "create_bolt11_ln_bits",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def create_bolt11_ln_bits(sats: int, config) -> (str, str):\n if config.LNBITS_URL == \"\":\n return None, None\n url = config.LNBITS_URL + \"/api/v1/payments\"\n data = {'out': False, 'amount': sats, 'memo': \"Nostr-DVM \" + config.NIP89.NAME}\n headers = {'X-API-Key': config.LNBITS_INVOICE_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.post(url, json=data, headers=headers)\n obj = json.loads(res.text)\n if obj.get(\"payment_request\") and obj.get(\"payment_hash\"):\n return obj[\"payment_request\"], obj[\"payment_hash\"] #\n else:\n print(\"LNBITS: \" + res.text)\n return None, None\n except Exception as e:\n print(\"LNBITS: \" + str(e))\n return None, None"
},
{
"identifier": "parse_zap_event_tags",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def parse_zap_event_tags(zap_event, keys, name, client, config):\n zapped_event = None\n invoice_amount = 0\n anon = False\n message = \"\"\n sender = zap_event.pubkey()\n for tag in zap_event.tags():\n if tag.as_vec()[0] == 'bolt11':\n invoice_amount = parse_amount_from_bolt11_invoice(tag.as_vec()[1])\n elif tag.as_vec()[0] == 'e':\n zapped_event = get_event_by_id(tag.as_vec()[1], client=client, config=config)\n zapped_event = check_and_decrypt_own_tags(zapped_event, config)\n elif tag.as_vec()[0] == 'p':\n p_tag = tag.as_vec()[1]\n elif tag.as_vec()[0] == 'description':\n zap_request_event = Event.from_json(tag.as_vec()[1])\n sender = check_for_zapplepay(zap_request_event.pubkey().to_hex(),\n zap_request_event.content())\n for z_tag in zap_request_event.tags():\n if z_tag.as_vec()[0] == 'anon':\n if len(z_tag.as_vec()) > 1:\n # print(\"[\" + name + \"] Private Zap received.\")\n decrypted_content = decrypt_private_zap_message(z_tag.as_vec()[1],\n keys.secret_key(),\n zap_request_event.pubkey())\n decrypted_private_event = Event.from_json(decrypted_content)\n if decrypted_private_event.kind() == 9733:\n sender = decrypted_private_event.pubkey().to_hex()\n message = decrypted_private_event.content()\n # if message != \"\":\n # print(\"Zap Message: \" + message)\n else:\n anon = True\n print(\n \"[\" + name + \"] Anonymous Zap received. Unlucky, I don't know from whom, and never will\")\n\n return invoice_amount, zapped_event, sender, message, anon"
},
{
"identifier": "parse_amount_from_bolt11_invoice",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def parse_amount_from_bolt11_invoice(bolt11_invoice: str) -> int:\n def get_index_of_first_letter(ip):\n index = 0\n for c in ip:\n if c.isalpha():\n return index\n else:\n index = index + 1\n return len(ip)\n\n remaining_invoice = bolt11_invoice[4:]\n index = get_index_of_first_letter(remaining_invoice)\n identifier = remaining_invoice[index]\n number_string = remaining_invoice[:index]\n number = float(number_string)\n if identifier == 'm':\n number = number * 100000000 * 0.001\n elif identifier == 'u':\n number = number * 100000000 * 0.000001\n elif identifier == 'n':\n number = number * 100000000 * 0.000000001\n elif identifier == 'p':\n number = number * 100000000 * 0.000000000001\n\n return int(number)"
},
{
"identifier": "zaprequest",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def zaprequest(lud16: str, amount: int, content, zapped_event, zapped_user, keys, relay_list, zaptype=\"public\"):\n if lud16.startswith(\"LNURL\") or lud16.startswith(\"lnurl\"):\n url = lnurl.decode(lud16)\n elif '@' in lud16: # LNaddress\n url = 'https://' + str(lud16).split('@')[1] + '/.well-known/lnurlp/' + str(lud16).split('@')[0]\n else: # No lud16 set or format invalid\n return None\n try:\n response = requests.get(url)\n ob = json.loads(response.content)\n callback = ob[\"callback\"]\n encoded_lnurl = lnurl.encode(url)\n amount_tag = Tag.parse(['amount', str(amount * 1000)])\n relays_tag = Tag.parse(['relays', str(relay_list)])\n lnurl_tag = Tag.parse(['lnurl', encoded_lnurl])\n if zapped_event is not None:\n p_tag = Tag.parse(['p', zapped_event.pubkey().to_hex()])\n e_tag = Tag.parse(['e', zapped_event.id().to_hex()])\n tags = [amount_tag, relays_tag, p_tag, e_tag, lnurl_tag]\n else:\n p_tag = Tag.parse(['p', zapped_user.to_hex()])\n tags = [amount_tag, relays_tag, p_tag, lnurl_tag]\n\n\n if zaptype == \"private\":\n key_str = keys.secret_key().to_hex() + zapped_event.id().to_hex() + str(zapped_event.created_at().as_secs())\n encryption_key = sha256(key_str.encode('utf-8')).hexdigest()\n\n zap_request = EventBuilder(9733, content,\n [p_tag, e_tag]).to_event(keys).as_json()\n keys = Keys.from_sk_str(encryption_key)\n encrypted_content = enrypt_private_zap_message(zap_request, keys.secret_key(), zapped_event.pubkey())\n anon_tag = Tag.parse(['anon', encrypted_content])\n tags.append(anon_tag)\n content = \"\"\n\n zap_request = EventBuilder(9734, content,\n tags).to_event(keys).as_json()\n\n response = requests.get(callback + \"?amount=\" + str(int(amount) * 1000) + \"&nostr=\" + urllib.parse.quote_plus(\n zap_request) + \"&lnurl=\" + encoded_lnurl)\n ob = json.loads(response.content)\n return ob[\"pr\"]\n\n except Exception as e:\n print(\"ZAP REQUEST: \" + e)\n return None"
},
{
"identifier": "pay_bolt11_ln_bits",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def pay_bolt11_ln_bits(bolt11: str, config):\n url = config.LNBITS_URL + \"/api/v1/payments\"\n data = {'out': True, 'bolt11': bolt11}\n headers = {'X-API-Key': config.LNBITS_ADMIN_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.post(url, json=data, headers=headers)\n obj = json.loads(res.text)\n if obj.get(\"payment_hash\"):\n return obj[\"payment_hash\"]\n else:\n return \"Error\"\n except Exception as e:\n print(\"LNBITS: \" + str(e))\n return \"Error\""
},
{
"identifier": "create_bolt11_lud16",
"path": "nostr_dvm/utils/zap_utils.py",
"snippet": "def create_bolt11_lud16(lud16, amount):\n if lud16.startswith(\"LNURL\") or lud16.startswith(\"lnurl\"):\n url = lnurl.decode(lud16)\n elif '@' in lud16: # LNaddress\n url = 'https://' + str(lud16).split('@')[1] + '/.well-known/lnurlp/' + str(lud16).split('@')[0]\n else: # No lud16 set or format invalid\n return None\n try:\n print(url)\n response = requests.get(url)\n ob = json.loads(response.content)\n callback = ob[\"callback\"]\n response = requests.get(callback + \"?amount=\" + str(int(amount) * 1000))\n ob = json.loads(response.content)\n return ob[\"pr\"]\n except Exception as e:\n print(\"LUD16: \" + e)\n return None"
},
{
"identifier": "redeem_cashu",
"path": "nostr_dvm/utils/cashu_utils.py",
"snippet": "def redeem_cashu(cashu, config, client, required_amount=0, update_self=False) -> (bool, str, int, int):\n proofs, mint, total_amount, message = parse_cashu(cashu)\n if message is not None:\n return False, message, 0, 0\n\n estimated_fees = max(int(total_amount * 0.02), 3)\n estimated_redeem_invoice_amount = total_amount - estimated_fees\n\n # Not sure if this the best way to go, we first create an invoice that we send to the mint, we catch the fees\n # for that invoice, and create another invoice with the amount without fees to melt.\n if config.LNBITS_INVOICE_KEY != \"\":\n invoice, paymenthash = create_bolt11_ln_bits(estimated_redeem_invoice_amount, config)\n else:\n\n user = get_or_add_user(db=config.DB, npub=config.PUBLIC_KEY,\n client=client, config=config, update=update_self)\n invoice = create_bolt11_lud16(user.lud16, estimated_redeem_invoice_amount)\n print(invoice)\n if invoice is None:\n return False, \"couldn't create invoice\", 0, 0\n\n url = mint + \"/checkfees\" # Melt cashu tokens at Mint\n json_object = {\"pr\": invoice}\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n request_body = json.dumps(json_object).encode('utf-8')\n request = requests.post(url, data=request_body, headers=headers)\n tree = json.loads(request.text)\n fees = tree[\"fee\"]\n print(\"Fees on this mint are \" + str(fees) + \" Sats\")\n redeem_invoice_amount = total_amount -fees\n if redeem_invoice_amount < required_amount:\n err = (\"Token value (Payment: \" + str(total_amount) + \" Sats. Fees: \" +\n str(fees) + \" Sats) below required amount of \" + str(required_amount)\n + \" Sats. Cashu token has not been claimed.\")\n print(\"[\" + config.NIP89.NAME + \"] \" + err)\n return False, err, 0, 0\n\n if config.LNBITS_INVOICE_KEY != \"\":\n invoice, paymenthash = create_bolt11_ln_bits(redeem_invoice_amount, config)\n else:\n\n user = get_or_add_user(db=config.DB, npub=config.PUBLIC_KEY,\n client=client, config=config, update=update_self)\n invoice = create_bolt11_lud16(user.lud16, redeem_invoice_amount)\n print(invoice)\n\n try:\n url = mint + \"/melt\" # Melt cashu tokens at Mint\n json_object = {\"proofs\": proofs, \"pr\": invoice}\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n request_body = json.dumps(json_object).encode('utf-8')\n request = requests.post(url, data=request_body, headers=headers)\n tree = json.loads(request.text)\n print(request.text)\n is_paid = tree[\"paid\"] if tree.get(\"paid\") else False\n print(is_paid)\n if is_paid:\n print(\"cashu token redeemed\")\n return True, \"success\", redeem_invoice_amount, fees\n else:\n msg = tree.get(\"detail\").split('.')[0].strip() if tree.get(\"detail\") else None\n print(msg)\n return False, msg, redeem_invoice_amount, fees\n except Exception as e:\n print(e)\n\n return False, \"\", redeem_invoice_amount, fees"
}
] | import json
import os
import subprocess
import time
from datetime import timedelta
from sys import platform
from nostr_sdk import PublicKey, Keys, Client, Tag, Event, EventBuilder, Filter, HandleNotification, Timestamp, \
init_logger, LogLevel, Options, nip04_encrypt, ClientSigner
from nostr_dvm.utils.definitions import EventDefinitions, RequiredJobToWatch, JobToWatch
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.admin_utils import admin_make_database_updates, AdminConfig
from nostr_dvm.utils.backend_utils import get_amount_per_task, check_task_is_supported, get_task
from nostr_dvm.utils.database_utils import create_sql_table, get_or_add_user, update_user_balance, update_sql_table
from nostr_dvm.utils.mediasource_utils import input_data_file_duration
from nostr_dvm.utils.nostr_utils import get_event_by_id, get_referenced_event_by_id, send_event, check_and_decrypt_tags
from nostr_dvm.utils.output_utils import build_status_reaction
from nostr_dvm.utils.zap_utils import check_bolt11_ln_bits_is_paid, create_bolt11_ln_bits, parse_zap_event_tags, \
parse_amount_from_bolt11_invoice, zaprequest, pay_bolt11_ln_bits, create_bolt11_lud16
from nostr_dvm.utils.cashu_utils import redeem_cashu | 10,396 |
use_logger = False
if use_logger:
init_logger(LogLevel.DEBUG)
class DVM:
dvm_config: DVMConfig
admin_config: AdminConfig
keys: Keys
client: Client
job_list: list
jobs_on_hold_list: list
def __init__(self, dvm_config, admin_config=None):
self.dvm_config = dvm_config
self.admin_config = admin_config
self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)
wait_for_send = True
skip_disconnected_relays = True
opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))
.skip_disconnected_relays(skip_disconnected_relays))
signer = ClientSigner.keys(self.keys)
self.client = Client.with_opts(signer,opts)
self.job_list = []
self.jobs_on_hold_list = []
pk = self.keys.public_key()
print("Nostr DVM public key: " + str(pk.to_bech32()) + " Hex: " + str(pk.to_hex()) + " Supported DVM tasks: " +
', '.join(p.NAME + ":" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + "\n")
for relay in self.dvm_config.RELAY_LIST:
self.client.add_relay(relay)
self.client.connect()
zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())
kinds = [EventDefinitions.KIND_NIP90_GENERIC]
for dvm in self.dvm_config.SUPPORTED_DVMS:
if dvm.KIND not in kinds:
kinds.append(dvm.KIND)
dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))
self.client.subscribe([dvm_filter, zap_filter])
create_sql_table(self.dvm_config.DB)
admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)
class NotificationHandler(HandleNotification):
client = self.client
dvm_config = self.dvm_config
keys = self.keys
def handle(self, relay_url, nostr_event):
if EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= nostr_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC:
handle_nip90_job_event(nostr_event)
elif nostr_event.kind() == EventDefinitions.KIND_ZAP:
handle_zap(nostr_event)
def handle_msg(self, relay_url, msg):
return
def handle_nip90_job_event(nip90_event):
nip90_event = check_and_decrypt_tags(nip90_event, self.dvm_config)
if nip90_event is None:
return
user = get_or_add_user(self.dvm_config.DB, nip90_event.pubkey().to_hex(), client=self.client,
config=self.dvm_config)
cashu = ""
p_tag_str = ""
for tag in nip90_event.tags():
if tag.as_vec()[0] == "cashu":
cashu = tag.as_vec()[1]
elif tag.as_vec()[0] == "p":
p_tag_str = tag.as_vec()[1]
|
use_logger = False
if use_logger:
init_logger(LogLevel.DEBUG)
class DVM:
dvm_config: DVMConfig
admin_config: AdminConfig
keys: Keys
client: Client
job_list: list
jobs_on_hold_list: list
def __init__(self, dvm_config, admin_config=None):
self.dvm_config = dvm_config
self.admin_config = admin_config
self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)
wait_for_send = True
skip_disconnected_relays = True
opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))
.skip_disconnected_relays(skip_disconnected_relays))
signer = ClientSigner.keys(self.keys)
self.client = Client.with_opts(signer,opts)
self.job_list = []
self.jobs_on_hold_list = []
pk = self.keys.public_key()
print("Nostr DVM public key: " + str(pk.to_bech32()) + " Hex: " + str(pk.to_hex()) + " Supported DVM tasks: " +
', '.join(p.NAME + ":" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + "\n")
for relay in self.dvm_config.RELAY_LIST:
self.client.add_relay(relay)
self.client.connect()
zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())
kinds = [EventDefinitions.KIND_NIP90_GENERIC]
for dvm in self.dvm_config.SUPPORTED_DVMS:
if dvm.KIND not in kinds:
kinds.append(dvm.KIND)
dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))
self.client.subscribe([dvm_filter, zap_filter])
create_sql_table(self.dvm_config.DB)
admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)
class NotificationHandler(HandleNotification):
client = self.client
dvm_config = self.dvm_config
keys = self.keys
def handle(self, relay_url, nostr_event):
if EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= nostr_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC:
handle_nip90_job_event(nostr_event)
elif nostr_event.kind() == EventDefinitions.KIND_ZAP:
handle_zap(nostr_event)
def handle_msg(self, relay_url, msg):
return
def handle_nip90_job_event(nip90_event):
nip90_event = check_and_decrypt_tags(nip90_event, self.dvm_config)
if nip90_event is None:
return
user = get_or_add_user(self.dvm_config.DB, nip90_event.pubkey().to_hex(), client=self.client,
config=self.dvm_config)
cashu = ""
p_tag_str = ""
for tag in nip90_event.tags():
if tag.as_vec()[0] == "cashu":
cashu = tag.as_vec()[1]
elif tag.as_vec()[0] == "p":
p_tag_str = tag.as_vec()[1]
| task_supported, task = check_task_is_supported(nip90_event, client=self.client, | 7 | 2023-11-17 18:32:56+00:00 | 12k |
embrake/Aquilify | aquilify/wrappers/request.py | [
{
"identifier": "AwaitableOrContextManager",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManager(Protocol[T_co]):\n def __await__(self) -> typing.Generator[typing.Any, None, T_co]:\n ... # pragma: no cover\n\n async def __aenter__(self) -> T_co:\n ... # pragma: no cover\n\n async def __aexit__(\n self,\n __exc_type: typing.Optional[typing.Type[BaseException]],\n __exc_value: typing.Optional[BaseException],\n __traceback: typing.Optional[TracebackType],\n ) -> typing.Union[bool, None]:\n ... # pragma: no cover"
},
{
"identifier": "AwaitableOrContextManagerWrapper",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManagerWrapper(typing.Generic[SupportsAsyncCloseType]):\n __slots__ = (\"aw\", \"entered\")\n\n def __init__(self, aw: typing.Awaitable[SupportsAsyncCloseType]) -> None:\n self.aw = aw\n\n def __await__(self) -> typing.Generator[typing.Any, None, SupportsAsyncCloseType]:\n return self.aw.__await__()\n\n async def __aenter__(self) -> SupportsAsyncCloseType:\n self.entered = await self.aw\n return self.entered\n\n async def __aexit__(self, *args: typing.Any) -> typing.Union[None, bool]:\n await self.entered.close()\n return None"
},
{
"identifier": "URL",
"path": "aquilify/datastructure/core.py",
"snippet": "class URL:\n def __init__(\n self,\n url: str = \"\",\n scope: typing.Optional[Scope] = None,\n **components: typing.Any,\n ) -> None:\n if scope is not None:\n assert not url, 'Cannot set both \"url\" and \"scope\".'\n assert not components, 'Cannot set both \"scope\" and \"**components\".'\n scheme = scope.get(\"scheme\", \"http\")\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n query_string = scope.get(\"query_string\", b\"\")\n\n host_header = None\n for key, value in scope[\"headers\"]:\n if key == b\"host\":\n host_header = value.decode(\"latin-1\")\n break\n\n if host_header is not None:\n url = f\"{scheme}://{host_header}{path}\"\n elif server is None:\n url = path\n else:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port == default_port:\n url = f\"{scheme}://{host}{path}\"\n else:\n url = f\"{scheme}://{host}:{port}{path}\"\n\n if query_string:\n url += \"?\" + query_string.decode()\n elif components:\n assert not url, 'Cannot set both \"url\" and \"**components\".'\n url = URL(\"\").replace(**components).components.geturl()\n\n self._url = url\n\n @property\n def components(self) -> SplitResult:\n if not hasattr(self, \"_components\"):\n self._components = urlsplit(self._url)\n return self._components\n\n @property\n def scheme(self) -> str:\n return self.components.scheme\n\n @property\n def netloc(self) -> str:\n return self.components.netloc\n\n @property\n def path(self) -> str:\n return self.components.path\n\n @property\n def query(self) -> str:\n return self.components.query\n\n @property\n def fragment(self) -> str:\n return self.components.fragment\n\n @property\n def username(self) -> typing.Union[None, str]:\n return self.components.username\n\n @property\n def password(self) -> typing.Union[None, str]:\n return self.components.password\n\n @property\n def hostname(self) -> typing.Union[None, str]:\n return self.components.hostname\n\n @property\n def port(self) -> typing.Optional[int]:\n return self.components.port\n\n @property\n def is_secure(self) -> bool:\n return self.scheme in (\"https\", \"wss\")\n\n def replace(self, **kwargs: typing.Any) -> \"URL\":\n if (\n \"username\" in kwargs\n or \"password\" in kwargs\n or \"hostname\" in kwargs\n or \"port\" in kwargs\n ):\n hostname = kwargs.pop(\"hostname\", None)\n port = kwargs.pop(\"port\", self.port)\n username = kwargs.pop(\"username\", self.username)\n password = kwargs.pop(\"password\", self.password)\n\n if hostname is None:\n netloc = self.netloc\n _, _, hostname = netloc.rpartition(\"@\")\n\n if hostname[-1] != \"]\":\n hostname = hostname.rsplit(\":\", 1)[0]\n\n netloc = hostname\n if port is not None:\n netloc += f\":{port}\"\n if username is not None:\n userpass = username\n if password is not None:\n userpass += f\":{password}\"\n netloc = f\"{userpass}@{netloc}\"\n\n kwargs[\"netloc\"] = netloc\n\n components = self.components._replace(**kwargs)\n return self.__class__(components.geturl())\n\n def include_query_params(self, **kwargs: typing.Any) -> \"URL\":\n params = MultiDict(parse_qsl(self.query, keep_blank_values=True))\n params.update({str(key): str(value) for key, value in kwargs.items()})\n query = urlencode(params.multi_items())\n return self.replace(query=query)\n\n def replace_query_params(self, **kwargs: typing.Any) -> \"URL\":\n query = urlencode([(str(key), str(value)) for key, value in kwargs.items()])\n return self.replace(query=query)\n\n def remove_query_params(\n self, keys: typing.Union[str, typing.Sequence[str]]\n ) -> \"URL\":\n if isinstance(keys, str):\n keys = [keys]\n params = MultiDict(parse_qsl(self.query, keep_blank_values=True))\n for key in keys:\n params.pop(key, None)\n query = urlencode(params.multi_items())\n return self.replace(query=query)\n\n def __eq__(self, other: typing.Any) -> bool:\n return str(self) == str(other)\n\n def __str__(self) -> str:\n return self._url\n\n def __repr__(self) -> str:\n url = str(self)\n if self.password:\n url = str(self.replace(password=\"********\"))\n return f\"{self.__class__.__name__}({repr(url)})\""
},
{
"identifier": "Address",
"path": "aquilify/datastructure/core.py",
"snippet": "class Address(typing.NamedTuple):\n host: str\n port: int"
},
{
"identifier": "FormData",
"path": "aquilify/datastructure/core.py",
"snippet": "class FormData(ImmutableMultiDict[str, typing.Union[UploadFile, str]]):\n def __init__(\n self,\n *args: typing.Union[\n \"FormData\",\n typing.Mapping[str, typing.Union[str, UploadFile]],\n typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]],\n ],\n **kwargs: typing.Union[str, UploadFile],\n ) -> None:\n super().__init__(*args, **kwargs)\n\n async def close(self) -> None:\n for key, value in self.multi_items():\n if isinstance(value, UploadFile):\n await value.close()"
},
{
"identifier": "Headers",
"path": "aquilify/datastructure/core.py",
"snippet": "class Headers(typing.Mapping[str, str]):\n def __init__(\n self,\n headers: typing.Optional[typing.Mapping[str, str]] = None,\n raw: typing.Optional[typing.List[typing.Tuple[bytes, bytes]]] = None,\n scope: typing.Optional[typing.MutableMapping[str, typing.Any]] = None,\n ) -> None:\n self._list: typing.List[typing.Tuple[bytes, bytes]] = []\n if headers is not None:\n assert raw is None, 'Cannot set both \"headers\" and \"raw\".'\n assert scope is None, 'Cannot set both \"headers\" and \"scope\".'\n self._list = [\n (key.lower().encode(\"latin-1\"), value.encode(\"latin-1\"))\n for key, value in headers.items()\n ]\n elif raw is not None:\n assert scope is None, 'Cannot set both \"raw\" and \"scope\".'\n self._list = raw\n elif scope is not None:\n self._list = scope[\"headers\"] = list(scope[\"headers\"])\n\n @property\n def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:\n return list(self._list)\n\n def keys(self) -> typing.List[str]: # type: ignore[override]\n return [key.decode(\"latin-1\") for key, value in self._list]\n\n def values(self) -> typing.List[str]: # type: ignore[override]\n return [value.decode(\"latin-1\") for key, value in self._list]\n\n def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore[override]\n return [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in self._list\n ]\n\n def getlist(self, key: str) -> typing.List[str]:\n get_header_key = key.lower().encode(\"latin-1\")\n return [\n item_value.decode(\"latin-1\")\n for item_key, item_value in self._list\n if item_key == get_header_key\n ]\n\n def mutablecopy(self) -> \"MutableHeaders\":\n return MutableHeaders(raw=self._list[:])\n\n def __getitem__(self, key: str) -> str:\n get_header_key = key.lower().encode(\"latin-1\")\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return header_value.decode(\"latin-1\")\n raise KeyError(key)\n\n def __contains__(self, key: typing.Any) -> bool:\n get_header_key = key.lower().encode(\"latin-1\")\n for header_key, header_value in self._list:\n if header_key == get_header_key:\n return True\n return False\n\n def __iter__(self) -> typing.Iterator[typing.Any]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return len(self._list)\n\n def __eq__(self, other: typing.Any) -> bool:\n if not isinstance(other, Headers):\n return False\n return sorted(self._list) == sorted(other._list)\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n as_dict = dict(self.items())\n if len(as_dict) == len(self):\n return f\"{class_name}({as_dict!r})\"\n return f\"{class_name}(raw={self.raw!r})\""
},
{
"identifier": "State",
"path": "aquilify/datastructure/core.py",
"snippet": "class State:\n _state: typing.Dict[str, typing.Any]\n\n def __init__(self, state: typing.Optional[typing.Dict[str, typing.Any]] = None):\n if state is None:\n state = {}\n super().__setattr__(\"_state\", state)\n\n def __setattr__(self, key: typing.Any, value: typing.Any) -> None:\n self._state[key] = value\n\n def __getattr__(self, key: typing.Any) -> typing.Any:\n try:\n return self._state[key]\n except KeyError:\n message = \"'{}' object has no attribute '{}'\"\n raise AttributeError(message.format(self.__class__.__name__, key))\n\n def __delattr__(self, key: typing.Any) -> None:\n del self._state[key]"
},
{
"identifier": "HTTPException",
"path": "aquilify/exception/http_exception.py",
"snippet": "class HTTPException(Exception):\n def __init__(\n self,\n status_code: int,\n detail: typing.Optional[str] = None,\n headers: typing.Optional[dict] = None,\n ) -> None:\n if detail is None:\n detail = http.HTTPStatus(status_code).phrase\n self.status_code = status_code\n self.detail = detail\n self.headers = headers\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})\""
},
{
"identifier": "FormParser",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class FormParser:\n def __init__(\n self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.messages: typing.List[typing.Tuple[FormMessage, bytes]] = []\n\n def on_field_start(self) -> None:\n message = (FormMessage.FIELD_START, b\"\")\n self.messages.append(message)\n\n def on_field_name(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_NAME, data[start:end])\n self.messages.append(message)\n\n def on_field_data(self, data: bytes, start: int, end: int) -> None:\n message = (FormMessage.FIELD_DATA, data[start:end])\n self.messages.append(message)\n\n def on_field_end(self) -> None:\n message = (FormMessage.FIELD_END, b\"\")\n self.messages.append(message)\n\n def on_end(self) -> None:\n message = (FormMessage.END, b\"\")\n self.messages.append(message)\n\n async def parse(self) -> FormData:\n # Callbacks dictionary.\n callbacks = {\n \"on_field_start\": self.on_field_start,\n \"on_field_name\": self.on_field_name,\n \"on_field_data\": self.on_field_data,\n \"on_field_end\": self.on_field_end,\n \"on_end\": self.on_end,\n }\n\n # Create the parser.\n parser = multipart.QuerystringParser(callbacks)\n field_name = b\"\"\n field_value = b\"\"\n\n items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n\n # Feed the parser with data from the request.\n async for chunk in self.stream:\n if chunk:\n parser.write(chunk)\n else:\n parser.finalize()\n messages = list(self.messages)\n self.messages.clear()\n for message_type, message_bytes in messages:\n if message_type == FormMessage.FIELD_START:\n field_name = b\"\"\n field_value = b\"\"\n elif message_type == FormMessage.FIELD_NAME:\n field_name += message_bytes\n elif message_type == FormMessage.FIELD_DATA:\n field_value += message_bytes\n elif message_type == FormMessage.FIELD_END:\n name = unquote_plus(field_name.decode(\"latin-1\"))\n value = unquote_plus(field_value.decode(\"latin-1\"))\n items.append((name, value))\n\n return FormData(items)"
},
{
"identifier": "MultiPartException",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class MultiPartException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message"
},
{
"identifier": "MultiPartParser",
"path": "aquilify/datastructure/formparser.py",
"snippet": "class MultiPartParser:\n max_file_size = 1024 * 1024\n\n def __init__(\n self,\n headers: Headers,\n stream: typing.AsyncGenerator[bytes, None],\n *,\n max_files: typing.Union[int, float] = 1000,\n max_fields: typing.Union[int, float] = 1000,\n ) -> None:\n assert (\n multipart is not None\n ), \"The `python-multipart` library must be installed to use form parsing.\"\n self.headers = headers\n self.stream = stream\n self.max_files = max_files\n self.max_fields = max_fields\n self.items: typing.List[typing.Tuple[str, typing.Union[str, UploadFile]]] = []\n self._current_files = 0\n self._current_fields = 0\n self._current_partial_header_name: bytes = b\"\"\n self._current_partial_header_value: bytes = b\"\"\n self._current_part = MultipartPart()\n self._charset = \"\"\n self._file_parts_to_write: typing.List[typing.Tuple[MultipartPart, bytes]] = []\n self._file_parts_to_finish: typing.List[MultipartPart] = []\n self._files_to_close_on_error: typing.List[SpooledTemporaryFile] = []\n\n def on_part_begin(self) -> None:\n self._current_part = MultipartPart()\n\n def on_part_data(self, data: bytes, start: int, end: int) -> None:\n message_bytes = data[start:end]\n if self._current_part.file is None:\n self._current_part.data += message_bytes\n else:\n self._file_parts_to_write.append((self._current_part, message_bytes))\n\n def on_part_end(self) -> None:\n if self._current_part.file is None:\n self.items.append(\n (\n self._current_part.field_name,\n _user_safe_decode(self._current_part.data, self._charset),\n )\n )\n else:\n self._file_parts_to_finish.append(self._current_part)\n self.items.append((self._current_part.field_name, self._current_part.file))\n\n def on_header_field(self, data: bytes, start: int, end: int) -> None:\n self._current_partial_header_name += data[start:end]\n\n def on_header_value(self, data: bytes, start: int, end: int) -> None:\n self._current_partial_header_value += data[start:end]\n\n def on_header_end(self) -> None:\n field = self._current_partial_header_name.lower()\n if field == b\"content-disposition\":\n self._current_part.content_disposition = self._current_partial_header_value\n self._current_part.item_headers.append(\n (field, self._current_partial_header_value)\n )\n self._current_partial_header_name = b\"\"\n self._current_partial_header_value = b\"\"\n\n def on_headers_finished(self) -> None:\n disposition, options = parse_options_header(\n self._current_part.content_disposition\n )\n try:\n self._current_part.field_name = _user_safe_decode(\n options[b\"name\"], self._charset\n )\n except KeyError:\n raise MultiPartException(\n 'The Content-Disposition header field \"name\" must be ' \"provided.\"\n )\n if b\"filename\" in options:\n self._current_files += 1\n if self._current_files > self.max_files:\n raise MultiPartException(\n f\"Too many files. Maximum number of files is {self.max_files}.\"\n )\n filename = _user_safe_decode(options[b\"filename\"], self._charset)\n tempfile = SpooledTemporaryFile(max_size=self.max_file_size)\n self._files_to_close_on_error.append(tempfile)\n self._current_part.file = UploadFile(\n file=tempfile, # type: ignore[arg-type]\n size=0,\n filename=filename,\n headers=Headers(raw=self._current_part.item_headers),\n )\n else:\n self._current_fields += 1\n if self._current_fields > self.max_fields:\n raise MultiPartException(\n f\"Too many fields. Maximum number of fields is {self.max_fields}.\"\n )\n self._current_part.file = None\n\n def on_end(self) -> None:\n pass\n\n async def parse(self) -> FormData:\n # Parse the Content-Type header to get the multipart boundary.\n _, params = parse_options_header(self.headers[\"Content-Type\"])\n charset = params.get(b\"charset\", \"utf-8\")\n if type(charset) == bytes:\n charset = charset.decode(\"latin-1\")\n self._charset = charset\n try:\n boundary = params[b\"boundary\"]\n except KeyError:\n raise MultiPartException(\"Missing boundary in multipart.\")\n\n # Callbacks dictionary.\n callbacks = {\n \"on_part_begin\": self.on_part_begin,\n \"on_part_data\": self.on_part_data,\n \"on_part_end\": self.on_part_end,\n \"on_header_field\": self.on_header_field,\n \"on_header_value\": self.on_header_value,\n \"on_header_end\": self.on_header_end,\n \"on_headers_finished\": self.on_headers_finished,\n \"on_end\": self.on_end,\n }\n\n parser = multipart.MultipartParser(boundary, callbacks)\n try:\n async for chunk in self.stream:\n parser.write(chunk)\n for part, data in self._file_parts_to_write:\n assert part.file # for type checkers\n await part.file.write(data)\n for part in self._file_parts_to_finish:\n assert part.file # for type checkers\n await part.file.seek(0)\n self._file_parts_to_write.clear()\n self._file_parts_to_finish.clear()\n except MultiPartException as exc:\n for file in self._files_to_close_on_error:\n file.close()\n raise exc\n\n parser.finalize()\n return FormData(self.items)"
},
{
"identifier": "UserAgentParser",
"path": "aquilify/datastructure/user_agent.py",
"snippet": "class UserAgentParser:\n def __init__(self, user_agent_string: str) -> None:\n self.user_agent_string: str = user_agent_string\n self.browser: str = ''\n self.browser_version: str = ''\n self.browser_engine: str = ''\n self.os: str = ''\n self.os_version: str = ''\n self.device: str = ''\n self.is_mobile: bool = False\n self.language: str = ''\n self.platform: str = ''\n self.is_bot_or_crawler: bool = False\n self.screen_resolution: str = ''\n self.viewport_size: str = ''\n self.js_enabled: bool = False\n self.referer: str = ''\n self.timezone: str = ''\n self._parse_user_agent()\n\n def _parse_user_agent(self) -> None:\n self.browser, self.browser_version = self._get_browser_info()\n self.browser_engine = self._get_browser_engine()\n self.os, self.os_version = self._get_os_info()\n self.device = self._get_device_info()\n self.is_mobile = self._check_mobile()\n self.language = self._get_language()\n self.platform = self._get_platform()\n self.is_bot_or_crawler = self._check_bot_or_crawler()\n self.screen_resolution = self._get_screen_resolution()\n self.viewport_size = self._get_viewport_size()\n self.js_enabled = self._check_javascript_enabled()\n self.referer = self._get_referer()\n self.timezone = self._get_timezone()\n\n def _get_browser_info(self):\n browsers = {\n 'Opera': r'Opera\\/([0-9.]+)',\n 'Firefox': r'Firefox\\/([0-9.]+)',\n 'Edge': r'(?:Edg(?:e)?)\\/([0-9.]+)',\n 'Chrome': r'Chrome\\/([0-9.]+)',\n 'Safari': r'Safari\\/([0-9.]+)',\n 'IE': r'MSIE ([0-9.]+)|rv:([0-9.]+)'\n }\n \n browser_priority = ['Edge', 'Chrome', 'Firefox', 'Safari', 'Opera', 'IE']\n\n for browser in browser_priority:\n if browser in browsers:\n pattern = browsers[browser]\n match = re.search(pattern, self.user_agent_string)\n if match:\n if browser == 'Edge':\n version = match.group(1) or ''\n else:\n version = match.group(1) or match.group(2) or ''\n return browser, version\n \n return 'Unknown', ''\n \n def _get_browser_engine(self):\n browser_engines = {\n 'Blink': 'Blink',\n 'WebKit': 'WebKit',\n 'Gecko': 'Gecko',\n 'Trident': 'Trident'\n }\n for engine, pattern in browser_engines.items():\n if pattern in self.user_agent_string:\n return engine\n return 'Unknown'\n\n def _get_os_info(self):\n operating_systems = {\n 'Windows': r'Windows NT ([0-9.]+)',\n 'Android': r'Android ([0-9.]+)',\n 'Linux': r'Linux',\n 'iOS': r'OS ([0-9_]+) like Mac',\n 'Mac': r'Mac OS X ([0-9_]+)'\n }\n for os, pattern in operating_systems.items():\n match = re.search(pattern, self.user_agent_string)\n if match:\n version = match.group(1).replace('_', '.') if match.group(1) else ''\n return os, version\n return 'Unknown', ''\n\n def _get_device_info(self):\n devices = {\n 'iPhone': r'iPhone(?:\\sSimulator)?',\n 'iPad': r'iPad(?:\\sSimulator)?',\n 'Mobile': r'Mobile',\n 'Tablet': r'Tablet',\n 'Desktop': r'Windows|Macintosh|Linux'\n }\n for device, pattern in devices.items():\n if re.search(pattern, self.user_agent_string):\n return device\n return 'Unknown'\n\n def _check_mobile(self):\n return 'Mobile' in self.user_agent_string\n\n def _get_language(self):\n match = re.search(r'(?<=\\b(?:language=))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_platform(self):\n platforms = {\n 'Windows': 'Windows',\n 'Linux': 'Linux',\n 'Mac': 'Macintosh'\n }\n for platform, pattern in platforms.items():\n if pattern in self.user_agent_string:\n return platform\n return 'Unknown'\n\n def _check_bot_or_crawler(self):\n bot_patterns = [\n 'bot',\n 'crawler',\n 'spider',\n 'googlebot',\n 'bingbot',\n 'slurp',\n 'duckduckbot',\n 'yandexbot'\n ]\n for bot_pattern in bot_patterns:\n if re.search(bot_pattern, self.user_agent_string, re.IGNORECASE):\n return True\n return False\n\n def _get_screen_resolution(self):\n match = re.search(r'(?<=\\b(?:Screen: ))([0-9]+x[0-9]+)', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_viewport_size(self):\n match = re.search(r'(?<=\\b(?:Viewport: ))([0-9]+x[0-9]+)', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _check_javascript_enabled(self):\n return 'JS' in self.user_agent_string\n\n def _get_referer(self):\n match = re.search(r'(?<=\\b(?:Referer: ))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n\n def _get_timezone(self):\n match = re.search(r'(?<=\\b(?:Timezone: ))(.*?)(?=[;|$])', self.user_agent_string)\n return match.group(1) if match else 'Unknown'\n \n def __str__(self) -> str:\n return str(self.user_agent_string)\n \n def __repr__(self) -> str:\n return f\"UserAgentParser({self.user_agent_string})\"\n\n def to_dict(self) -> dict:\n return {\n 'user_agent_string': self.user_agent_string,\n 'browser': self.browser,\n 'browser_version': self.browser_version,\n 'browser_engine': self.browser_engine,\n 'os': self.os,\n 'os_version': self.os_version,\n 'device': self.device,\n 'is_mobile': self.is_mobile,\n 'language': self.language,\n 'platform': self.platform,\n 'is_bot_or_crawler': self.is_bot_or_crawler,\n 'screen_resolution': self.screen_resolution,\n 'viewport_size': self.viewport_size,\n 'js_enabled': self.js_enabled,\n 'referer': self.referer,\n 'timezone': self.timezone\n }"
},
{
"identifier": "Message",
"path": "aquilify/types.py",
"snippet": "T = typing.TypeVar(\"T\")"
}
] | import json
import typing
import anyio
from http import cookies as http_cookies
from urllib.parse import parse_qs
from ..utils._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper
from ..datastructure.core import URL, Address, FormData, Headers, State
from ..exception.http_exception import HTTPException
from ..datastructure.formparser import FormParser, MultiPartException, MultiPartParser
from ..datastructure.user_agent import UserAgentParser
from ..types import Message, Receive, Scope, Send
from multipart.multipart import parse_options_header | 7,281 |
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property
|
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property | def url(self) -> URL: | 2 | 2023-11-16 08:26:02+00:00 | 12k |
IBM/oper8 | oper8/x/utils/deps_annotation.py | [
{
"identifier": "DEPS_ANNOTATION",
"path": "oper8/x/utils/constants.py",
"snippet": "DEPS_ANNOTATION = \"oper8.org/dependency-hash\""
},
{
"identifier": "Component",
"path": "oper8/component.py",
"snippet": "class Component(Node, abc.ABC):\n \"\"\"\n This file defines the top-level interface for a \"Component\" in the\n deployment ecosystem. Each Component will ultimately resolve to a Node in\n the deployment execution graph which can be atomically rendered, deployed,\n verified, and if needed reverted.\n \"\"\"\n\n @abstractclassproperty\n def name(self):\n \"\"\"All Components must implement a name class attribute\"\"\"\n\n def __init__(\n self,\n session: Session,\n disabled: bool = False,\n ):\n \"\"\"Construct with the session for this deployment\n\n Args:\n session: Session\n The session that this component will belong to\n disabled: bool\n Whether or not this component is disabled\n \"\"\"\n # Ensure that the name property is defined by accessing it and that\n # namespace is inherited from session.\n self.name # noqa: B018\n self.session_namespace = session.namespace\n self.disabled = disabled\n\n # Initialize Node with name\n super().__init__(self.name)\n\n # Register with the session\n # NOTE: This is done before the parent initialization so duplicates can\n # be caught by the session with a nice error rather than Graph\n log.debug2(\"[%s] Auto-registering %s\", session.id, self)\n session.add_component(self)\n\n # Initialize the Graph that'll control component rendering\n self.graph = Graph()\n\n # The list of all managed objects owned by this component\n self._managed_objects = None\n\n def __str__(self):\n return f\"Component({self.name})\"\n\n @property\n def managed_objects(self) -> List[ManagedObject]:\n \"\"\"The list of managed objects that this Component currently knows\n about. If called before rending, this will be an empty list, so it will\n always be iterable.\n\n Returns:\n managed_objects: List[ManagedObject]\n The list of known managed objects\n \"\"\"\n return self._managed_objects or []\n\n ## Base Class Interface ####################################################\n #\n # These methods MAY be implemented by children, but contain default\n # implementations that are appropriate for simple cases.\n #\n # NOTE: We liberally use pylint disables here to make the base interface\n # clear to deriving classes.\n ##\n\n def build_chart(self, session: Session): # pylint: disable=unused-argument\n \"\"\"The build_chart function allows the derived class to add child Charts\n lazily so that they can take advantage of post-initialization\n information.\n\n Args:\n session: Session\n The current deploy session\n \"\"\"\n\n def verify(self, session):\n \"\"\"The verify function will run any necessary testing and validation\n that the component needs to ensure that rollout was successfully\n completed.\n\n Args:\n session: Session\n The current reconciliation session\n\n Returns:\n success: bool\n True on successful deployment verification, False on failure\n conditions\n \"\"\"\n return self._default_verify(session, is_subsystem=False)\n\n @alog.logged_function(log.debug2)\n @alog.timed_function(log.debug2)\n def render_chart(self, session):\n \"\"\"This will be invoked by the parent Application to build and render\n the individual component's chart\n\n Args:\n session: Session\n The session for this reconciliation\n \"\"\"\n\n # Do the rendering\n self.__render(session)\n\n # If a working directory is configured, use it\n if config.working_dir:\n rendered_file = self.to_file(session)\n log.debug(\"Rendered %s to %s\", self, rendered_file)\n\n def update_object_definition(\n self,\n session: Session, # pylint: disable=unused-argument\n internal_name: str, # pylint: disable=unused-argument\n resource_definition: dict,\n ):\n \"\"\"Allow children to inject arbitrary object mutation logic for\n individual managed objects\n\n The base implementation simply returns the given definition as a\n passthrough\n\n Args:\n session: Session\n The session for this reconciliation\n internal_name: str\n The internal name of the object to update\n resource_definition: dict\n The dict representation of the resource to modify\n\n Returns:\n resource_definition: dict\n The dict representation of the resource with any modifications\n applied\n \"\"\"\n return resource_definition\n\n @alog.logged_function(log.debug2)\n @alog.timed_function(log.debug2)\n def deploy(self, session):\n \"\"\"Deploy the component\n\n Args:\n session: Session\n The current reconciliation session\n\n Returns:\n success: bool\n True on successful application of the kub state (not\n programmatic verification), False otherwise\n \"\"\"\n assert (\n self._managed_objects is not None\n ), \"Cannot call deploy() before render_chart()\"\n\n # Deploy all managed objects\n success, _ = session.deploy_manager.deploy(\n [obj.definition for obj in self._managed_objects]\n )\n if not success:\n log.warning(\"Failed to deploy [%s]\", self)\n return False\n return True\n\n def disable(self, session):\n \"\"\"Disable the component\n\n Args:\n session: Session\n The current reconciliation session\n\n Returns:\n success: bool\n True on successful application of the kub state (not\n programmatic verification), False otherwise\n \"\"\"\n assert (\n self._managed_objects is not None\n ), \"Cannot call disable() before render_chart()\"\n\n # Disable all managed objects\n success, _ = session.deploy_manager.disable(\n [obj.definition for obj in self._managed_objects]\n )\n if not success:\n log.warning(\"Failed to disable [%s]\", self)\n return False\n return True\n\n ## Resource Interface ####################################################\n #\n # These methods offer functionality that children can use to add resources to\n # a components graph\n ##\n\n def add_resource(\n self,\n name: str, # pylint: disable=redefined-builtin\n obj: Any,\n ) -> Optional[\n ResourceNode\n ]: # pylint: disable=unused-argument, redefined-builtin, invalid-name\n \"\"\"The add_resource function allows the derived class to add resources\n to this component to later be rendered\n\n Args:\n name: str\n The name of the resource in the Graph\n obj: Any\n An object or dict which can be manipulated into a dict\n representation of the kubernetes resource\n \"\"\"\n # Sanitize object to enable native support for openapi objects\n obj = sanitize_for_serialization(obj)\n\n # Add namespace to obj if not present\n obj.setdefault(\"metadata\", {}).setdefault(\"namespace\", self.session_namespace)\n\n node = ResourceNode(name, obj)\n self.graph.add_node(node)\n return node\n\n def add_dependency(\n self,\n session: Session,\n *components: \"Component\",\n verify_function: Optional[VERIFY_FUNCTION] = None,\n ):\n \"\"\"This add_dependency function sets up a dependency between this component\n and a list of other components. To add a dependency between resources inside\n this component use resource.add_dependency\n Args:\n session: Session\n The current resource session\n *components: Components\n Any number of components to be added as a dependency\n verify_function: Optional[verify_function]\n An Optional callable function of the form `def verify(session) -> bool:`\n to use to verify that the dependency has been satisfied. This\n will be used to block deployment of the component beyond\n requiring that the upstream has been deployed successfully.\n \"\"\"\n for component in components:\n session.add_component_dependency(self, component, verify_function)\n\n ## Base Class Utilities ####################################################\n #\n # These methods offer shared functionality that children can (and should)\n # use in their implementations\n ##\n\n @alog.logged_function(log.debug2)\n def to_dict(self, session):\n \"\"\"\n Render the component and return it as a Dictionary, mainly useful for testing\n :return: Dictionary of the rendered component\n \"\"\"\n self.__render(session)\n return [obj.definition for obj in self.managed_objects]\n\n def to_config(self, session):\n \"\"\"\n Render the component and return it as an AttrDict, mainly useful for testing\n :return: AttrDict of the rendered component\n \"\"\"\n\n return [\n aconfig.Config(obj, override_env_vars=False)\n for obj in self.to_dict(session)\n ]\n\n def to_file(self, session):\n \"\"\"\n Render the component to disk and return the rendered file path\n :return: str path to rendered file\n \"\"\"\n assert config.working_dir is not None, \"Config must have a working_dir set\"\n\n # If disabled and not dumping disabled components, nothing to do\n if self.disabled and not config.dump_disabled:\n log.debug(\"Not dumping disabled component: %s\", self)\n return None\n\n # Get the in-memory representation\n objects = self.to_dict(session)\n\n # Get the output file name and make sure the directory structure exists\n path_parts = [\n config.working_dir,\n \".\".join([session.api_version.replace(\"/\", \".\"), session.kind]).lower(),\n session.name,\n ]\n if self.disabled:\n path_parts.append(\"DISABLED\")\n path_parts.append(self.name)\n output_dir = os.path.join(*path_parts)\n if not os.path.exists(output_dir):\n log.debug2(\"Creating output dir: %s\", output_dir)\n os.makedirs(output_dir)\n\n # Serialize to a yaml file\n instance_name = session.name\n output_file = os.path.join(output_dir, f\"{instance_name}-{self.name}.k8s.yaml\")\n log.debug2(\"Saving %s to %s\", self, output_file)\n with open(output_file, \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(\"---\\n\" + yaml.safe_dump_all(objects))\n\n return output_file\n\n ## Base Class Implementation Details #######################################\n #\n # These methods provide shared functionality to the base class function\n # implementations and should not be used directly by children\n ##\n\n @classmethod\n def get_name(cls): # pylint: disable=arguments-differ\n \"\"\"Override get_name to support class attribute\"\"\"\n return cls.name\n\n def _default_verify(self, session, is_subsystem=False):\n \"\"\"The verify function will run any necessary testing and validation\n that the component needs to ensure that rollout was successfully\n completed.\n\n Args:\n session: Session\n The current reconciliation session\n\n Returns:\n success: bool\n True on successful deployment verification, False on failure\n conditions\n \"\"\"\n log.debug2(\"Using default verification for [%s]\", self)\n\n # If this is in dry run mode, we skip verification since this relies on\n # checking for changes in the cluster which won't ever happen\n if config.dry_run:\n log.debug2(\"No verification to perform in dry_run\")\n return True\n\n # Verify all managed resources\n for resource in self.managed_objects:\n log.debug2(\"Verifying [%s/%s]\", resource.kind, resource.name)\n if not verify_resource(\n kind=resource.kind,\n name=resource.name,\n api_version=resource.api_version,\n session=session,\n is_subsystem=is_subsystem,\n namespace=resource.namespace,\n ):\n log.debug(\"[%s/%s] not verified\", resource.kind, resource.name)\n return False\n log.debug(\"All managed resources verified for [%s]\", self)\n return True\n\n @staticmethod\n def _preserve_patch_annotation(session, internal_name, resource_definition):\n \"\"\"This implementation helper checks the current state of the given\n resource and patches the desired state to preserve any temporary patch\n annotations found. This is done so that temporary patches can be applied\n to subsystem CRs managed by a top-level controller.\n \"\"\"\n\n # Get the current state of the object\n kind = resource_definition.get(\"kind\")\n api_version = resource_definition.get(\"apiVersion\")\n metadata = resource_definition.get(\"metadata\", {})\n name = metadata.get(\"name\")\n namespace = metadata.get(\"namespace\")\n assert (\n kind is not None and api_version is not None and name is not None\n ), f\"Resource {internal_name} missing critical metadata!\"\n success, content = session.get_object_current_state(\n kind=kind, name=name, api_version=api_version, namespace=namespace\n )\n assert_cluster(\n success,\n f\"Failed to look for current state for [{kind}/{api_version}/{namespace}/{name}]\",\n )\n\n # Look for existing patch annotations\n if content is not None:\n content_meta = content.get(\"metadata\", {})\n patch_anno = content_meta.get(\"annotations\", {}).get(\n TEMPORARY_PATCHES_ANNOTATION_NAME\n )\n\n # If found, update the resource\n if patch_anno:\n resource_definition.setdefault(\"metadata\", {}).setdefault(\n \"annotations\", {}\n )[TEMPORARY_PATCHES_ANNOTATION_NAME] = patch_anno\n\n # Any time we have metadata changes, we need to include the\n # resourceVersion. It can't hurt to do so, so we will just always do\n # it here if found.\n resource_version = content_meta.get(\"resourceVersion\")\n if resource_version is not None:\n resource_definition[\"metadata\"][\"resourceVersion\"] = resource_version\n\n # Make sure any ownerReferences are persisted as well\n owner_refs = content_meta.get(\"ownerReferences\")\n if owner_refs:\n resource_definition[\"metadata\"][\"ownerReferences\"] = owner_refs\n\n return resource_definition\n\n def __build_lazy_charts(self, session):\n \"\"\"Delegate to the child implementation of build_chart for lazy chart\n construction.\n \"\"\"\n self.build_chart(session)\n\n @alog.logged_function(log.debug3)\n def __render(self, session):\n \"\"\"This is the primary implementation for rendering objects into\n self.managed_objects\n \"\"\"\n\n # Short-circuit if already rendered\n if self._managed_objects is not None:\n log.debug2(\n \"%s returning %d pre-rendered objects\", self, len(self._managed_objects)\n )\n return self.managed_objects\n\n # Generate name and dict representation of objects\n resource_list = self.__gather_resources(session)\n\n # Iterate all ApiObject children in dependency order and perform the\n # rendering, including patches and backend modifications.\n self._managed_objects = []\n for name, obj in resource_list:\n # Apply any patches to this object\n log.debug2(\"Applying patches to managed object: %s\", name)\n log.debug4(\"Before Patching: %s\", obj)\n obj = apply_patches(name, obj, session.temporary_patches)\n\n # Make sure any temporary patch annotations that exist already\n # on this resource in the cluster are preserved\n log.debug2(\"Checking for existing subsystem patches on: %s\", name)\n obj = self._preserve_patch_annotation(session, name, obj)\n\n # Add the internal name annotation if enabled\n if config.internal_name_annotation:\n log.debug2(\n \"Adding internal name annotation [%s: %s]\",\n INTERNAL_NAME_ANOTATION_NAME,\n name,\n )\n obj.setdefault(\"metadata\", {}).setdefault(\"annotations\", {})[\n INTERNAL_NAME_ANOTATION_NAME\n ] = name\n\n # Allow children to inject additional modification logic\n log.debug4(\"Before Object Updates: %s\", obj)\n obj = self.update_object_definition(session, name, obj)\n\n # Add the resource to the set managed by the is component\n managed_obj = ManagedObject(obj)\n log.debug2(\"Adding managed object: %s\", managed_obj)\n log.debug4(\"Final Definition: %s\", obj)\n self._managed_objects.append(managed_obj)\n\n return self.managed_objects\n\n def __gather_resources(self, session) -> List[Tuple[str, dict]]:\n \"\"\"This is a helper for __render which handles converting resource objects\n into a list of dictionaries.\n \"\"\"\n # Perform lazy chart creation before finishing rendering\n self.__build_lazy_charts(session)\n\n # Determine the flattened set of ApiObject children.\n log.debug2(\"%s populating managed_objects\", self)\n topology = self.graph.topology()\n log.debug3(\"%s topology has %d elements\", self, len(topology))\n log.debug4([type(obj) for obj in topology])\n children = [node for node in topology if isinstance(node, ResourceNode)]\n log.debug2(\"%s found %d ResourceNode children\", self, len(children))\n\n resource_list = []\n for child in children:\n # Construct the managed object with its internal name\n child_name = \".\".join([self.name, child.get_name()])\n child_obj = child.get_data()\n resource_list.append((child_name, child_obj))\n\n return resource_list"
},
{
"identifier": "Session",
"path": "oper8/session.py",
"snippet": "class Session: # pylint: disable=too-many-instance-attributes,too-many-public-methods\n \"\"\"A session is the core context manager for the state of an in-progress\n reconciliation\n \"\"\"\n\n # We strictly define the set of attributes that a Session can have to\n # disallow arbitrary assignment\n __slots__ = [\n \"__components\",\n \"__component_dependencies\",\n \"__enabled_components\",\n \"__disabled_components\",\n \"__id\",\n \"__cr_manifest\",\n \"__config\",\n \"__temporary_patches\",\n \"__deploy_manager\",\n \"__status\",\n \"__current_version\",\n \"__graph\",\n # _app is retained for backwards compatibility\n \"_app\",\n ]\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n reconciliation_id: str,\n cr_manifest: aconfig.Config,\n config: aconfig.Config,\n deploy_manager: DeployManagerBase,\n temporary_patches: Optional[List[dict]] = None,\n ):\n \"\"\"Construct a session object to hold the state for a reconciliation\n\n Args:\n reconciliation_id: str\n The unique ID for this reconciliation\n cr_manifest: aconfig.Config\n The full value of the CR mainfest that triggered this\n reconciliation\n config: aconfig.Config\n The compiled backend config for this reconciliation\n deploy_manager: DeployManagerBase\n The preconfigured DeployManager in charge of running the actual\n deploy operations for this deployment\n temporary_patches: list(dict)\n List of temporary patch object to apply to resources managed by\n this rollout\n \"\"\"\n\n ##################################################################\n # Private Members: These members will be hidden from client code #\n ##################################################################\n\n # Mapping from component name to Component instance\n self.__graph = Graph()\n\n ###################################################\n # Properties: These properties will be exposed as #\n # @property members to be used by client code #\n ###################################################\n\n self.__id = reconciliation_id\n if not isinstance(cr_manifest, aconfig.Config):\n cr_manifest = aconfig.Config(cr_manifest, override_env_vars=False)\n self._validate_cr(cr_manifest)\n self.__cr_manifest = cr_manifest\n if not isinstance(config, aconfig.Config):\n config = aconfig.Config(config, override_env_vars=False)\n self.__config = config\n self.__temporary_patches = temporary_patches or []\n\n # The deploy manager that will be used to manage interactions with the\n # cluster\n self.__deploy_manager = deploy_manager\n\n # Get the current status and version so that it can be referenced by the\n # Application and Components that use it\n self.__status = self.get_status()\n self.__current_version = get_version(self.status)\n\n ## Properties ##############################################################\n\n @property\n def id(self) -> str: # pylint: disable=invalid-name\n \"\"\"The unique reconciliation ID\"\"\"\n return self.__id\n\n @property\n def cr_manifest(self) -> aconfig.Config:\n \"\"\"The full CR manifest that triggered this reconciliation\"\"\"\n return self.__cr_manifest\n\n @property\n def spec(self) -> aconfig.Config:\n \"\"\"The spec section of the CR manifest\"\"\"\n return self.cr_manifest.get(\"spec\", aconfig.Config({}))\n\n @property\n def version(self) -> str:\n \"\"\"The spec.version for this CR\"\"\"\n return get_manifest_version(self.cr_manifest)\n\n @property\n def metadata(self) -> aconfig.Config:\n \"\"\"The metadata for this CR\"\"\"\n return self.cr_manifest.metadata\n\n @property\n def kind(self) -> str:\n \"\"\"The kind of the operand for this CR\"\"\"\n return self.cr_manifest.kind\n\n @property\n def api_version(self) -> str:\n \"\"\"The api version of the operand for this CR\"\"\"\n return self.cr_manifest.apiVersion\n\n @property\n def name(self) -> str:\n \"\"\"The metadata.name for this CR\"\"\"\n return self.metadata.name\n\n @property\n def namespace(self) -> str:\n \"\"\"The metadata.namespace for this CR\"\"\"\n return self.metadata.namespace\n\n @property\n def finalizers(self) -> str:\n \"\"\"The metadata.namespace for this CR\"\"\"\n\n # Manually create finalizer list if it doesn't exist so its\n # editable\n if \"finalizers\" not in self.metadata:\n self.metadata[\"finalizers\"] = []\n\n return self.metadata.get(\"finalizers\")\n\n @property\n def config(self) -> aconfig.Config:\n \"\"\"The backend config for this reconciliation\"\"\"\n return self.__config\n\n @property\n def temporary_patches(self) -> List[aconfig.Config]:\n \"\"\"Ordered list of temporary patches that apply to the operand being\n reconciled\n \"\"\"\n return self.__temporary_patches\n\n @property\n def status(self) -> aconfig.Config:\n \"\"\"The operand status\"\"\"\n return self.__status\n\n @property\n def current_version(self) -> aconfig.Config:\n \"\"\"The most recently reconciled version of the operand\"\"\"\n return self.__current_version\n\n @property\n def deploy_manager(self) -> DeployManagerBase:\n \"\"\"Allow read access to the deploy manager\"\"\"\n return self.__deploy_manager\n\n @property\n def graph(self) -> str: # pylint: disable=invalid-name\n \"\"\"The component graph\"\"\"\n return self.__graph\n\n ## State Management ########################################################\n #\n # These functions are used by derived controllers in their setup_components\n # implementations\n ##\n\n @alog.logged_function(log.debug2)\n def add_component(self, component: COMPONENT_INSTANCE_TYPE):\n \"\"\"Add a component to this deploy associated with a specfic application\n\n Args:\n component: Component\n The component to add to this deploy\n disabled: bool\n Whether or not the component is disabled in this deploy\n \"\"\"\n self.graph.add_node(component)\n\n def add_component_dependency(\n self,\n component: Union[str, COMPONENT_INSTANCE_TYPE],\n upstream_component: Union[str, COMPONENT_INSTANCE_TYPE],\n verify_function: Optional[VERIFY_FUNCTION] = None,\n ):\n \"\"\"Add a dependency indicating that one component requires an upstream\n component to be deployed before it can be deployed.\n\n Args:\n component: str or Component\n The component or name of component in the deploy that must wait for the upstream\n upstream_component: str or Component\n The upstream component or name of upstream that must be deployed before component\n verify_function: callable\n A callable function of the form `def verify(session) -> bool:`\n to use to verify that the dependency has been satisified. This\n will be used to block deployment of the component beyond\n requiring that the upstream has been deployed successfully.\n \"\"\"\n # Get component obj if name was provided\n component_node = component\n if isinstance(component, str):\n component_node = self.get_component(component)\n\n upstream_component_node = upstream_component\n if isinstance(upstream_component, str):\n upstream_component_node = self.get_component(upstream_component)\n\n if not component_node or not upstream_component_node:\n raise ValueError(\n f\"Cannot add dependency [{component} -> {upstream_component}]\",\n \" for unknown component(s)\",\n )\n\n if component_node.disabled or upstream_component_node.disabled:\n raise ValueError(\n f\"Cannot add dependency [{component} -> {upstream_component}]\",\n \" for with disabled component(s)\",\n )\n\n # Add session parameter to verify function if one was provided\n if verify_function:\n verify_function = partial(verify_function, self)\n self.graph.add_node_dependency(\n component_node, upstream_component_node, verify_function\n )\n\n ## Utilities ###############################################################\n #\n # These utilities may be used anywhere in client code to perform common\n # operations based on the state of the session.\n ##\n def get_component(\n self, name: str, disabled: Optional[bool] = None\n ) -> Optional[COMPONENT_INSTANCE_TYPE]:\n \"\"\"Get an individual component by name\n\n Args:\n name: str\n Name of component to return\n disabled: Optional[bool]\n Option on wether to return disabled components. If this option is not supplied then\n the referenced component will be returned irregardless whether its disabled\n or enabled\n\n Returns:\n component: Optional[Component]\n The component with the given name or None if component does not exit or does\n not match disabled arg\n \"\"\"\n comp = self.graph.get_node(name)\n\n # Only filter disabled/enabled components if the option was passed in.\n if isinstance(disabled, bool):\n if disabled:\n return comp if comp.disabled else None\n return comp if not comp.disabled else None\n\n return comp\n\n def get_components(self, disabled: bool = False) -> List[COMPONENT_INSTANCE_TYPE]:\n \"\"\"Get all components associated with an application\n\n Args:\n disabled: bool\n Whether to return disabled or enabled components\n\n Returns:\n components: list(Component)\n The list of Component objects associated with the given\n application\n \"\"\"\n assert isinstance(\n disabled, bool\n ), \"Disabled flag must be a bool. You may be using the old function signature!\"\n\n # Get list of all components.\n comp_list = self.graph.get_all_nodes()\n\n # Filter out disabled/enabled components using get_component\n filtered_list = [\n comp for comp in comp_list if self.get_component(comp.get_name(), disabled)\n ]\n\n return filtered_list\n\n def get_component_dependencies(\n self,\n component: Union[str, COMPONENT_INSTANCE_TYPE],\n ) -> List[Tuple[COMPONENT_INSTANCE_TYPE, Optional[VERIFY_FUNCTION]]]:\n \"\"\"Get the list of (upstream_name, verify_function) tuples for a given\n component.\n\n NOTE: This is primarily for use inside of the RolloutManager. Do not use\n this method in user code unless you know what you're doing!\n\n Args:\n component_name: str\n The name of the component to lookup dependencies for\n\n Returns:\n upstreams: List[Tuple[str, Optional[VERIFY_FUNCTION]]]\n The list of upstream (name, verify_fn) pairs\n \"\"\"\n component_node = component\n if isinstance(component, str):\n component_node = self.get_component(component)\n\n return component_node.get_children()\n\n def get_scoped_name(self, name: str) -> str:\n \"\"\"Get a name that is scoped to the application instance\n\n Args:\n name: str\n The name of a resource that will be managed by this operator\n which should have instance name scoping applied\n\n Returns:\n scoped_name: str\n The scoped and truncated version of the input name\n \"\"\"\n scoped_name = self.get_truncated_name(f\"{self.name}-{name}\")\n log.debug3(\"Scoped name [%s] -> [%s]\", name, scoped_name)\n return scoped_name\n\n @staticmethod\n def get_truncated_name(name: str) -> str:\n \"\"\"Perform truncation on a cluster name to make it conform to kubernetes\n limits while remaining unique.\n\n Args:\n name: str\n The name of the resource that should be truncated and made\n unique\n\n Returns:\n truncated_name: str\n A version of name that has been truncated and made unique\n \"\"\"\n if len(name) > MAX_NAME_LEN:\n sha = hashlib.sha256()\n sha.update(name.encode(\"utf-8\"))\n trunc_name = name[: MAX_NAME_LEN - 4] + sha.hexdigest()[:4]\n log.debug2(\"Truncated name [%s] -> [%s]\", name, trunc_name)\n name = trunc_name\n return name\n\n def get_object_current_state(\n self,\n kind: str,\n name: str,\n api_version: Optional[str] = None,\n namespace: Optional[str] = _SESSION_NAMESPACE,\n ) -> Tuple[bool, Optional[dict]]:\n \"\"\"Get the current state of the given object in the namespace of this\n session\n\n Args:\n kind: str\n The kind of the object to fetch\n name: str\n The full name of the object to fetch\n api_version: str\n The api_version of the resource kind to fetch\n\n Returns:\n success: bool\n Whether or not the state fetch operation succeeded\n current_state: dict or None\n The dict representation of the current object's configuration,\n or None if not present\n \"\"\"\n namespace = namespace if namespace != _SESSION_NAMESPACE else self.namespace\n return self.deploy_manager.get_object_current_state(\n kind=kind,\n name=name,\n namespace=namespace,\n api_version=api_version,\n )\n\n def filter_objects_current_state( # pylint: disable=too-many-arguments\n self,\n kind: str,\n api_version: Optional[str] = None,\n label_selector: Optional[str] = None,\n field_selector: Optional[str] = None,\n namespace: Optional[str] = _SESSION_NAMESPACE,\n ) -> Tuple[bool, List[dict]]:\n \"\"\"Get the current state of the given object in the namespace of this\n session\n\n Args:\n kind: str\n The kind of the object to fetch\n label_selector: str\n The label selector to filter the results by\n field_selector: str\n The field selector to filter the results by\n api_version: str\n The api_version of the resource kind to fetch\n\n Returns:\n success: bool\n Whether or not the state fetch operation succeeded\n current_state: List[Dict]\n The list of resources in dict representation,\n or [] if none match\n \"\"\"\n namespace = namespace if namespace != _SESSION_NAMESPACE else self.namespace\n return self.deploy_manager.filter_objects_current_state(\n kind=kind,\n namespace=namespace,\n api_version=api_version,\n label_selector=label_selector,\n field_selector=field_selector,\n )\n\n @alog.logged_function(log.debug2)\n @alog.timed_function(log.debug2)\n def get_status(self) -> dict:\n \"\"\"Get the status of the resource being managed by this session or an\n empty dict if not available\n\n Returns:\n current_status: dict\n The dict representation of the status subresource for the CR\n being managed by this session\n \"\"\"\n\n # Pull the kind, name, and namespace\n kind = self.cr_manifest.get(\"kind\")\n name = self.name\n api_version = self.api_version\n log.debug3(\"Getting status for %s.%s/%s\", api_version, kind, name)\n\n # Fetch the current status\n success, content = self.get_object_current_state(\n kind=kind,\n name=name,\n api_version=api_version,\n )\n assert_cluster(\n success, f\"Failed to fetch status for [{api_version}/{kind}/{name}]\"\n )\n if content:\n return content.get(\"status\", {})\n return {}\n\n ## Implementation Details ##################################################\n\n @staticmethod\n def _validate_cr(cr_manifest: aconfig.Config):\n \"\"\"Ensure that all expected elements of the CR are present. Expected\n elements are those that are guaranteed to be present by the kube API.\n \"\"\"\n assert \"kind\" in cr_manifest, \"CR missing required section ['kind']\"\n assert \"apiVersion\" in cr_manifest, \"CR missing required section ['apiVersion']\"\n assert \"metadata\" in cr_manifest, \"CR missing required section ['metadata']\"\n assert (\n \"name\" in cr_manifest.metadata\n ), \"CR missing required section ['metadata.name']\"\n assert (\n \"namespace\" in cr_manifest.metadata\n ), \"CR missing required section ['metadata.namespace']\""
},
{
"identifier": "merge_configs",
"path": "oper8/utils.py",
"snippet": "def merge_configs(base, overrides) -> dict:\n \"\"\"Helper to perform a deep merge of the overrides into the base. The merge\n is done in place, but the resulting dict is also returned for convenience.\n\n The merge logic is quite simple: If both the base and overrides have a key\n and the type of the key for both is a dict, recursively merge, otherwise\n set the base value to the override value.\n\n Args:\n base: dict\n The base config that will be updated with the overrides\n overrides: dict\n The override config\n\n Returns:\n merged: dict\n The merged results of overrides merged onto base\n \"\"\"\n for key, value in overrides.items():\n if (\n key not in base\n or not isinstance(base[key], dict)\n or not isinstance(value, dict)\n ):\n base[key] = value\n else:\n base[key] = merge_configs(base[key], value)\n\n return base"
}
] | from typing import List, Tuple, Union
from .constants import DEPS_ANNOTATION
from oper8 import Component, Session
from oper8.utils import merge_configs
import hashlib
import json
import alog | 9,053 | """
This module holds shared functionality for adding dependency annotations to all
resources that need them.
A dependency annotation on a Pod encodes a unique hash of the set of
data-resources that the Pod depends on. For example, if a Pod mounds a Secret
and a ConfigMap, the dependency annotation will hold a unique hash of the data
content of these secrets. The role of the dependency annotation is to force a
rollover when upstream data-resources change their content so that the content
is guaranteed to be picked up by the consuming Pod.
"""
# Standard
# First Party
# Local
log = alog.use_channel("DEPS")
## Common Functions ############################################################
@alog.logged_function(log.debug)
def add_deps_annotation(
component: Component,
session: Session,
resource_definition: dict,
) -> dict:
"""Add the dependency hash annotation to any pods found in the given object
Args:
component: Component
The component that this resource belongs to
session: Session
The session for this deploy
resource_definition: dict
The dict representation of the resource to modify
Returns:
resource_definition: dict
The dict representation of the resource with any modifications
applied
"""
resource_name = "{}/{}".format(
resource_definition.get("kind"),
resource_definition.get("metadata", {}).get("name"),
)
# Look for any/all pod annotations
pod = _find_pod(resource_definition)
if pod is not None:
log.debug2("Found Pod for [%s]", resource_name)
log.debug4(pod)
# Traverse through and look for anything that looks like a secret or
# configmap reference
deps_map = _find_pod_data_deps(pod)
log.debug3("Deps Map: %s", deps_map)
if deps_map:
# Go through each dependency and determine if it needs to be fetched
# of if it's part of the owning component
deps_list = []
for dep_kind, dep_names in deps_map.items():
for dep_name in dep_names:
# Look for this object in the objects managed by this
# component.
#
# NOTE: This will only be the components which have been
# declared earlier in the chart or have explicitly been
# marked as upstreams of this object.
found_in_component = False
for obj in component.managed_objects:
log.debug4("Checking %s/%s", obj.kind, obj.name)
if obj.kind == dep_kind and obj.name == dep_name:
log.debug3(
"Found intra-chart dependency of %s: %s",
resource_name,
obj,
)
deps_list.append(obj.definition)
found_in_component = True
break
# If not found in the component, add it as a lookup
if not found_in_component:
log.debug3(
"Found extra-chart dependency of %s: %s/%s",
resource_name,
dep_kind,
dep_name,
)
deps_list.append((dep_kind, dep_name))
# Add the annotation with the full list
md = pod.setdefault("metadata", {})
annos = md.setdefault("annotations", {})
| """
This module holds shared functionality for adding dependency annotations to all
resources that need them.
A dependency annotation on a Pod encodes a unique hash of the set of
data-resources that the Pod depends on. For example, if a Pod mounds a Secret
and a ConfigMap, the dependency annotation will hold a unique hash of the data
content of these secrets. The role of the dependency annotation is to force a
rollover when upstream data-resources change their content so that the content
is guaranteed to be picked up by the consuming Pod.
"""
# Standard
# First Party
# Local
log = alog.use_channel("DEPS")
## Common Functions ############################################################
@alog.logged_function(log.debug)
def add_deps_annotation(
component: Component,
session: Session,
resource_definition: dict,
) -> dict:
"""Add the dependency hash annotation to any pods found in the given object
Args:
component: Component
The component that this resource belongs to
session: Session
The session for this deploy
resource_definition: dict
The dict representation of the resource to modify
Returns:
resource_definition: dict
The dict representation of the resource with any modifications
applied
"""
resource_name = "{}/{}".format(
resource_definition.get("kind"),
resource_definition.get("metadata", {}).get("name"),
)
# Look for any/all pod annotations
pod = _find_pod(resource_definition)
if pod is not None:
log.debug2("Found Pod for [%s]", resource_name)
log.debug4(pod)
# Traverse through and look for anything that looks like a secret or
# configmap reference
deps_map = _find_pod_data_deps(pod)
log.debug3("Deps Map: %s", deps_map)
if deps_map:
# Go through each dependency and determine if it needs to be fetched
# of if it's part of the owning component
deps_list = []
for dep_kind, dep_names in deps_map.items():
for dep_name in dep_names:
# Look for this object in the objects managed by this
# component.
#
# NOTE: This will only be the components which have been
# declared earlier in the chart or have explicitly been
# marked as upstreams of this object.
found_in_component = False
for obj in component.managed_objects:
log.debug4("Checking %s/%s", obj.kind, obj.name)
if obj.kind == dep_kind and obj.name == dep_name:
log.debug3(
"Found intra-chart dependency of %s: %s",
resource_name,
obj,
)
deps_list.append(obj.definition)
found_in_component = True
break
# If not found in the component, add it as a lookup
if not found_in_component:
log.debug3(
"Found extra-chart dependency of %s: %s/%s",
resource_name,
dep_kind,
dep_name,
)
deps_list.append((dep_kind, dep_name))
# Add the annotation with the full list
md = pod.setdefault("metadata", {})
annos = md.setdefault("annotations", {}) | md["annotations"] = merge_configs( | 3 | 2023-11-15 16:43:29+00:00 | 12k |
smrfeld/tsmixer-pytorch | main.py | [
{
"identifier": "plot_preds",
"path": "utils/plotting.py",
"snippet": "def plot_preds(preds: List[List[float]], preds_gt: List[List[float]], no_feats_plot: int, fname_save: Optional[str] = None, inputs: Optional[List[List[float]]] = None, show: bool = True):\n \"\"\"Plot predictions\n\n Args:\n preds (List[List[float]]): Predictions of shape (no_samples, no_feats)\n preds_gt (List[List[float]]): Predictions of shape (no_samples, no_feats)\n no_feats_plot (int): Number of features to plot\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n inputs (Optional[List[List[float]]], optional): Input of shape (no_samples, no_feats)\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n\n no_feats = len(preds[0])\n if no_feats_plot > no_feats:\n logger.warning(f\"no_feats_plot ({no_feats_plot}) is larger than no_feats ({no_feats}). Setting no_feats_plot to no_feats\")\n no_feats_plot = no_feats\n\n no_cols = 3\n no_rows = int(no_feats_plot / no_cols)\n if no_feats_plot % no_cols != 0:\n no_rows += 1\n\n fig = make_subplots(rows=no_rows, cols=no_cols, subplot_titles=[f\"Feature {ifeat}\" for ifeat in range(no_feats_plot)])\n\n no_inputs = len(inputs) if inputs is not None else 0\n x_preds = list(range(no_inputs, no_inputs + len(preds)))\n for ifeat in range(no_feats_plot):\n row = int(ifeat / no_cols) + 1\n col = (ifeat % no_cols) + 1\n\n if inputs is not None:\n x_inputs = list(range(len(inputs)))\n fig.add_trace(go.Scatter(x=x_inputs, y=[in_y[ifeat] for in_y in inputs], mode=\"lines\", name=f\"Inputs\", line=dict(color=\"black\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds_gt], mode=\"lines\", name=f\"Ground truth\", line=dict(color=\"red\"), showlegend=ifeat==0), row=row, col=col)\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds], mode=\"lines\", name=f\"Model\", line=dict(color=\"blue\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.update_layout(\n height=300*no_rows, \n width=400*no_cols, \n title_text=\"Predictions\",\n font=dict(size=18),\n xaxis_title_text=\"Time\",\n yaxis_title_text=\"Signal\",\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig"
},
{
"identifier": "plot_loss",
"path": "utils/plotting.py",
"snippet": "def plot_loss(train_data: TrainingMetadata, fname_save: Optional[str] = None, show: bool = True):\n \"\"\"Plot loss\n\n Args:\n train_data (TSMixer.TrainingMetadata): Training metadata\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n\n fig = go.Figure()\n x = [ epoch for epoch in train_data.epoch_to_data.keys() ]\n y = [ data.val_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Val. loss\"))\n y = [ data.train_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Train loss\"))\n\n fig.update_layout(\n height=500, \n width=700, \n title_text=\"Loss during training\",\n xaxis_title_text=\"Epoch\",\n yaxis_title_text=\"Loss\",\n font=dict(size=18),\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig"
},
{
"identifier": "TSMixerConf",
"path": "utils/tsmixer_conf.py",
"snippet": "class TSMixerConf(DataClassDictMixin):\n\n class Initialize(Enum):\n FROM_LATEST_CHECKPOINT = \"from-latest-checkpoint\"\n \"Load the model from the latest checkpoint\"\n\n FROM_BEST_CHECKPOINT = \"from-best-checkpoint\"\n \"Load the model from the best checkpoint\"\n\n FROM_SCRATCH = \"from-scratch\"\n \"Initialize the model from scratch\"\n\n class DataSrc(Enum):\n\n CSV_FILE = \"csv-file\"\n \"Load the dataset from a CSV file\"\n\n class ValidationSplit(Enum):\n \n TEMPORAL_HOLDOUT = \"temporal-holdout\"\n \"Reserve the last portion (e.g., 10-20%) of your time-ordered data for validation, and use the remaining data for training. This is a simple and widely used approach.\"\n\n output_dir: str\n \"Directory where to save checkpoints and generated images\"\n\n input_length: int\n \"Number of time steps to use as input\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n no_mixer_layers: int\n \"Number of mixer layers\"\n\n prediction_length: int\n \"Number of time steps to predict\"\n\n data_src: DataSrc\n \"Where to load the dataset from\"\n\n device: str = \"mps\"\n \"Device to use for training\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n batch_size: int = 64\n \"Batch size\"\n\n shuffle: bool = True\n \"Shuffle the data\"\n\n num_epochs: int = 10\n \"Number of epochs to train for\"\n\n learning_rate: float = 0.001\n \"Learning rate\"\n\n optimizer: str = \"Adam\"\n \"Optimizer to use\"\n\n random_seed: int = 42\n \"Random seed for reproducibility\"\n\n validation_split: ValidationSplit = ValidationSplit.TEMPORAL_HOLDOUT\n \"How to split the data into training and validation\"\n\n validation_split_holdout: float = 0.2\n \"Use the last X% of the data for validation. Only used for TEMPORAL_HOLDOUT\"\n\n initialize: Initialize = Initialize.FROM_SCRATCH\n \"How to initialize the model\"\n\n dropout: float = 0.5\n \"Dropout\"\n\n feat_mixing_hidden_channels: Optional[int] = None\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n early_stopping_patience: Optional[int] = 5\n \"Early stopping patience. If the validation loss does not improve over this many epochs, stop early. If None, no early stopping is used.\"\n\n @property\n def image_dir(self):\n makedirs(self.output_dir)\n makedirs(os.path.join(self.output_dir, \"images\"))\n return os.path.join(self.output_dir, \"images\")\n\n @property\n def checkpoint_init(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"init.pth\")\n\n @property\n def checkpoint_best(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"best.pth\")\n\n @property\n def checkpoint_latest(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"latest.pth\")\n\n @property\n def train_progress_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"loss.json\")\n\n @property\n def pred_val_dataset_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"pred_val_dataset.json\")\n\n @property\n def data_norm_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"data_norm.json\")\n\n def check_valid(self):\n assert 0 <= self.validation_split_holdout <= 1, \"validation_split_holdout must be between 0 and 1\"\n\n # Check device exists\n import torch\n assert self.device in [\"cpu\", \"cuda\", \"cuda:0\", \"cuda:1\", \"cuda:2\", \"cuda:3\", \"mps\"], f\"Device {self.device} not supported\"\n if self.device == \"cuda\":\n assert torch.cuda.is_available(), \"CUDA is not available\"\n assert torch.cuda.device_count() > 1, \"Must have more than one CUDA device to use MPS\"\n elif self.device == \"mps\":\n assert torch.backends.mps.is_available(), \"MPS is not available\"\n \n\n def load_training_metadata_or_new(self, epoch_start: Optional[int] = None) -> \"TrainingMetadata\":\n \"\"\"Load the training progress from a JSON file, or create a new one\n\n Args:\n epoch_start (Optional[int], optional): Starting epoch - earlier epochs will be removed if not None. Defaults to None.\n\n Returns:\n TrainProgress: Training metadata\n \"\"\" \n if os.path.exists(self.train_progress_json):\n with open(self.train_progress_json, \"r\") as f:\n tp = TrainingMetadata.from_dict(json.load(f))\n\n # Remove epochs after epoch_start\n if epoch_start is not None:\n tp.epoch_to_data = { epoch: tp.epoch_to_data[epoch] for epoch in tp.epoch_to_data if epoch < epoch_start }\n \n return tp\n else:\n return TrainingMetadata(epoch_to_data={})\n\n\n def write_data_norm(self, data_norm: DataNormalization):\n \"\"\"Write the data normalization to a JSON file\n\n Args:\n data_norm (DataNormalization): Data normalization\n \"\"\" \n with open(self.data_norm_json, \"w\") as f:\n json.dump(data_norm.to_dict(), f, indent=3)\n logger.debug(f\"Saved data normalization to {f.name}\")\n\n\n def write_training_metadata(self, train_data: \"TrainingMetadata\"):\n \"\"\"Write the training progress to a JSON file\n\n Args:\n train_data (TrainingMetadata): _description_\n \"\"\" \n if os.path.dirname(self.train_progress_json) != \"\":\n makedirs(os.path.dirname(self.train_progress_json))\n with open(self.train_progress_json, \"w\") as f:\n json.dump(train_data.to_dict(), f, indent=3)\n\n\n def create_data_loaders_train_val(self, data_norm: Optional[DataNormalization] = None) -> Tuple[DataLoader, DataLoader, DataNormalization]:\n \"\"\"Create the training and validation data loaders\n\n Args:\n data_norm (Optional[DataNormalization], optional): Data normalization to use, otherwise will be calculated. Defaults to None.\n\n Returns:\n Tuple[DataLoader, DataLoader, DataNormalization]: Training and validation data loaders\n \"\"\" \n\n if self.data_src == self.DataSrc.CSV_FILE:\n assert self.data_src_csv is not None, \"data_src_csv must be set if data_src is CSV_FILE\"\n\n from .load_csv import load_csv_dataset, ValidationSplit\n return load_csv_dataset(\n csv_file=self.data_src_csv,\n batch_size=self.batch_size,\n input_length=self.input_length,\n prediction_length=self.prediction_length,\n val_split=ValidationSplit(self.validation_split.value),\n val_split_holdout=self.validation_split_holdout,\n shuffle=self.shuffle,\n data_norm_exist=data_norm\n )\n else:\n raise NotImplementedError(f\"data_src {self.data_src} not implemented\")"
},
{
"identifier": "TSMixerGridSearch",
"path": "utils/tsmixer_grid_search_conf.py",
"snippet": "class TSMixerGridSearch(DataClassDictMixin):\n \"\"\"Configuration for grid search\n \"\"\" \n\n @dataclass\n class ParamRange(DataClassDictMixin):\n \n learning_rates: List[float]\n \"Learning rates\"\n\n no_mixer_layers: List[int]\n \"Number of mixer layers\"\n\n dropouts: List[float]\n \"Dropout\"\n\n input_lengths: List[int]\n \"Number of time steps to use as input\"\n\n prediction_lengths: List[int]\n \"Number of time steps to predict\"\n\n feat_mixing_hidden_channels: List[Optional[int]] = field(default_factory=lambda: [None])\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n batch_sizes: List[int] = field(default_factory=lambda: [64])\n \"Batch size\"\n\n num_epochs: List[int] = field(default_factory=lambda: [100])\n \"Number of epochs to train for\"\n\n optimizers: List[str] = field(default_factory=lambda: [\"Adam\"])\n \"Optimizer to use\"\n\n @property\n def options_str(self) -> str:\n s = []\n s.append((\"lr\",str(self.learning_rates)))\n s.append((\"nmix\",str(self.no_mixer_layers)))\n s.append((\"drop\",str(self.dropouts)))\n s.append((\"in\",str(self.input_lengths)))\n s.append((\"pred\",str(self.prediction_lengths)))\n s.append((\"hidden\",str(self.feat_mixing_hidden_channels)))\n s.append((\"batch\",str(self.batch_sizes)))\n s.append((\"epochs\",str(self.num_epochs)))\n s.append((\"opt\",str(self.optimizers)))\n\n # Sort by key\n s = sorted(s, key=lambda x: x[0])\n\n return \"_\".join([f\"{k}{v}\" for k,v in s])\n\n param_ranges: List[ParamRange]\n \"Any number of parameter ranges to try\"\n\n output_dir: str\n \"Output directory\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n data_src: TSMixerConf.DataSrc = TSMixerConf.DataSrc.CSV_FILE\n \"Where to load the dataset from\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n def iterate(self) -> Iterator[TSMixerConf]:\n \"\"\"Iterate over all configurations\n\n Yields:\n Iterator[TSMixerConf]: Configuration for a single run\n \"\"\" \n for idx,param_range in enumerate(self.param_ranges):\n logger.info(\"===========================================\")\n logger.info(f\"Grid search iteration {idx+1}/{len(self.param_ranges)}\")\n logger.info(\"===========================================\")\n\n for learning_rate in param_range.learning_rates:\n for no_mixer_layers in param_range.no_mixer_layers:\n for dropout in param_range.dropouts:\n for feat_mixing_hidden_channels in param_range.feat_mixing_hidden_channels:\n for input_length in param_range.input_lengths:\n for prediction_length in param_range.prediction_lengths:\n for batch_size in param_range.batch_sizes:\n for num_epochs in param_range.num_epochs:\n for optimizer in param_range.optimizers:\n # Output subdir\n output_dir = os.path.join(self.output_dir, param_range.options_str)\n conf = TSMixerConf(\n input_length=input_length,\n prediction_length=prediction_length,\n no_features=self.no_features,\n no_mixer_layers=no_mixer_layers,\n output_dir=output_dir,\n data_src=self.data_src,\n data_src_csv=self.data_src_csv,\n batch_size=batch_size,\n num_epochs=num_epochs,\n learning_rate=learning_rate,\n optimizer=optimizer,\n dropout=dropout,\n feat_mixing_hidden_channels=feat_mixing_hidden_channels\n )\n logger.info(f\"TSMixer config: {conf}\")\n logger.info(f\"Output sub-dir: {output_dir}\")\n yield conf"
},
{
"identifier": "TSMixer",
"path": "utils/tsmixer.py",
"snippet": "class TSMixer:\n \"\"\"TSMixer including training and prediction methods\n \"\"\" \n\n\n def __init__(self, conf: TSMixerConf):\n \"\"\"Constructor for TSMixer class\n\n Args:\n conf (TSMixerConf): Configuration\n \"\"\" \n conf.check_valid()\n self.conf = conf\n\n # Create the model\n self.model = TSMixerModel(\n input_length=self.conf.input_length,\n forecast_length=self.conf.prediction_length,\n no_feats=self.conf.no_features,\n feat_mixing_hidden_channels=self.conf.feat_mixing_hidden_channels or self.conf.no_features,\n no_mixer_layers=self.conf.no_mixer_layers,\n dropout=self.conf.dropout\n )\n\n # Move to device\n self.model.to(self.conf.device)\n\n # Load the model\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_latest)\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_best)\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n pass\n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n\n\n def load_checkpoint(self, fname: str, optimizer: Optional[torch.optim.Optimizer] = None) -> Tuple[int,float]:\n \"\"\"Load a checkpoint, optionally including the optimizer state\n\n Args:\n fname (str): File name\n optimizer (Optional[torch.optim.Optimizer], optional): Optimizer to update from checkpoint. Defaults to None.\n\n Returns:\n Tuple[int,float]: Epoch and loss\n \"\"\" \n logger.debug(f\"Loading model weights from {fname}\")\n checkpoint = torch.load(fname)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n if optimizer is not None:\n logger.debug(f\"Loading optimizer state from {fname}\")\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n logger.info(f\"Loaded optimizer state from epoch {epoch} with loss {loss}\")\n return epoch, loss\n\n\n def predict(self, batch_input: torch.Tensor) -> torch.Tensor:\n \"\"\"Predict the output for a batch of input data\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n\n Returns:\n torch.Tensor: Predicted output of shape (batch_size, prediction_length (time), no_features)\n \"\"\" \n self.model.eval()\n\n # Check size\n assert batch_input.shape[1] == self.conf.input_length, f\"Input length {batch_input.shape[1]} does not match configuration {self.conf.input_length}\"\n assert batch_input.shape[2] == self.conf.no_features, f\"Number of features {batch_input.shape[2]} does not match configuration {self.conf.no_features}\"\n\n # Predict\n batch_input = batch_input.to(self.conf.device)\n with torch.no_grad():\n batch_pred_hat = self.model(batch_input)\n return batch_pred_hat\n\n\n def load_data_norm(self) -> Optional[DataNormalization]:\n \"\"\"Load the data normalization from a JSON file\n\n Returns:\n Optional[DataNormalization]: Data normalization, or None if the file does not exist\n \"\"\" \n\n if os.path.exists(self.conf.data_norm_json):\n logger.debug(f\"Loading data normalization from {self.conf.data_norm_json}\")\n with open(self.conf.data_norm_json, \"r\") as f:\n return DataNormalization.from_dict(json.load(f))\n else:\n return None\n\n\n @dataclass\n class PredData(DataClassDictMixin):\n \"\"\"Prediction data\n \"\"\" \n\n pred_gt: List[List[float]]\n \"Ground truth prediction\"\n\n pred: List[List[float]]\n \"Model prediction\"\n\n inputs: Optional[List[List[float]]] = None\n \"Inputs\"\n\n\n def predict_val_dataset(self, max_samples: Optional[int] = None, save_inputs: bool = False) -> List[PredData]:\n \"\"\"Predict on the validation dataset\n\n Args:\n max_samples (Optional[int], optional): Maximum number of samples to predict from the validation dataset. Defaults to None.\n save_inputs (bool, optional): Save the inputs as well as the predictions. Defaults to False.\n\n Returns:\n List[PredData]: List of predictions\n \"\"\" \n\n # Change batch size to 1 and not shuffle data for consistency\n batch_size_save = self.conf.batch_size\n shuffle_save = self.conf.shuffle\n self.conf.batch_size = 1\n self.conf.shuffle = False\n\n # Load the data normalization if it exists and use it\n data_norm = self.load_data_norm()\n\n # Create the loaders\n _, loader_val, _ = self.conf.create_data_loaders_train_val(data_norm)\n \n # Predict\n data_list: List[TSMixer.PredData] = []\n for _ in tqdm(range(max_samples or len(loader_val)), desc=\"Predicting\"):\n batch_input, batch_pred = next(iter(loader_val))\n batch_pred_hat = self.predict(batch_input)\n data = TSMixer.PredData(\n pred_gt=batch_pred.tolist()[0],\n pred=batch_pred_hat.tolist()[0],\n inputs=batch_input.tolist()[0] if save_inputs else None\n )\n data_list.append(data) \n\n # Save data to json\n with open(self.conf.pred_val_dataset_json, \"w\") as f:\n json.dump([ d.to_dict() for d in data_list ], f)\n logger.info(f\"Saved data to {f.name}\")\n\n # Reset options\n self.conf.batch_size = batch_size_save\n self.conf.shuffle = shuffle_save\n\n return data_list\n\n\n def train(self):\n \"\"\"Train the model\n \"\"\" \n\n # Create the optimizer\n optimizer_cls = getattr(torch.optim, self.conf.optimizer)\n optimizer = optimizer_cls(self.model.parameters(), lr=self.conf.learning_rate)\n\n # Load if needed\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_latest, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_best, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n epoch_start, val_loss_best = 0, float(\"inf\")\n\n # Clear the output directory\n if os.path.exists(self.conf.output_dir):\n logger.warning(f\"Output directory {self.conf.output_dir} already exists. Deleting it to start over. You have 8 seconds.\")\n for _ in range(8):\n print(\".\", end=\"\", flush=True)\n time.sleep(1)\n print(\"\")\n shutil.rmtree(self.conf.output_dir)\n makedirs(self.conf.output_dir)\n\n # Save initial weights\n self._save_checkpoint(epoch=epoch_start, optimizer=optimizer, loss=val_loss_best, fname=self.conf.checkpoint_init)\n data_norm = None\n\n # Copy the config to the output directory for reference\n fname_conf = os.path.join(self.conf.output_dir, \"conf.yml\")\n makedirs(self.conf.output_dir)\n with open(fname_conf, \"w\") as f:\n yaml.dump(self.conf.to_dict(), f, indent=3)\n logger.info(f\"Saved configuration to {f.name}\")\n \n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n train_data = self.conf.load_training_metadata_or_new(epoch_start)\n\n # Create the loaders\n loader_train, loader_val, data_norm = self.conf.create_data_loaders_train_val(data_norm)\n\n # Write data normalization\n self.conf.write_data_norm(data_norm)\n\n # Train\n epoch_last_improvement = None\n for epoch in range(epoch_start, self.conf.num_epochs):\n logger.info(f\"Epoch {epoch+1}/{self.conf.num_epochs}\")\n t0 = time.time()\n\n # Training\n train_loss = 0\n for batch_input, batch_pred in tqdm(loader_train, desc=\"Training batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n train_loss += self._train_step(batch_input, batch_pred, optimizer)\n\n # Validation loss\n self.model.eval()\n with torch.no_grad():\n val_loss = 0\n for batch_input, batch_pred in tqdm(loader_val, desc=\"Validation batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n val_loss += self._compute_loss(batch_input, batch_pred).item()\n\n # Log\n train_loss /= len(loader_train)\n val_loss /= len(loader_val)\n dur = time.time() - t0\n logger.info(f\"Training loss: {train_loss:.5f} val: {val_loss:.5f} duration: {dur:.2f}s\")\n\n # Store metadata about training\n train_data.epoch_to_data[epoch] = TrainingMetadata.EpochData(epoch=epoch, train_loss=train_loss, val_loss=val_loss, duration_seconds=dur)\n\n # Save checkpoint\n if val_loss < val_loss_best:\n logger.info(f\"New best validation loss: {val_loss:.5f}\")\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_best)\n val_loss_best = val_loss\n epoch_last_improvement = epoch\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_latest)\n self.conf.write_training_metadata(train_data)\n\n # Early stopping\n if epoch_last_improvement is not None and self.conf.early_stopping_patience is not None and epoch - epoch_last_improvement >= self.conf.early_stopping_patience:\n logger.info(f\"Stopping early after {epoch - epoch_last_improvement} epochs without improvement in validation loss.\")\n break\n\n\n def _save_checkpoint(self, epoch: int, optimizer: torch.optim.Optimizer, loss: float, fname: str):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss,\n }, fname)\n\n\n def _compute_loss(self, batch_input: torch.Tensor, batch_pred: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute the loss\n\n Args:\n batch_input (torch.Tensor): Batch input of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Batch prediction of shape (batch_size, prediction_length (time), no_features)\n\n Returns:\n torch.Tensor: Loss (MSE)\n \"\"\" \n\n # Forward pass\n batch_pred_hat = self.model(batch_input)\n\n # Compute MSE loss\n loss = torch.nn.functional.mse_loss(batch_pred_hat, batch_pred)\n\n # Normalize the loss by the batch size\n # batch_size = batch_input.size(0)\n # loss /= batch_size\n\n return loss\n\n\n def _train_step(self, batch_input: torch.Tensor, batch_pred: torch.Tensor, optimizer: torch.optim.Optimizer) -> float:\n \"\"\"Training step\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Prediction data of shape (batch_size, prediction_length (time), no_features)\n optimizer (torch.optim.Optimizer): Optimizer\n\n Returns:\n float: Loss (MSE)\n \"\"\" \n optimizer.zero_grad()\n\n # Train mode\n self.model.train()\n\n # Loss\n loss = self._compute_loss(batch_input, batch_pred)\n\n # Backward pass\n loss.backward()\n\n # Update parameters\n optimizer.step()\n\n return loss.item()"
}
] | from utils import TSMixer, plot_preds, plot_loss, TSMixerConf, TSMixerGridSearch
import argparse
import yaml
import os | 7,246 |
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new() | plot_loss( | 1 | 2023-11-18 19:56:18+00:00 | 12k |
Jisencc/yolov5_dual_weighting | utils/dataloaders.py | [
{
"identifier": "Albumentations",
"path": "utils/augmentations.py",
"snippet": "class Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self, size=640):\n self.transform = None\n prefix = colorstr('albumentations: ')\n try:\n import albumentations as A\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n\n T = [\n A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),\n A.Blur(p=0.01),\n A.MedianBlur(p=0.01),\n A.ToGray(p=0.01),\n A.CLAHE(p=0.01),\n A.RandomBrightnessContrast(p=0.0),\n A.RandomGamma(p=0.0),\n A.ImageCompression(quality_lower=75, p=0.0)] # transforms\n self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))\n except ImportError: # package not installed, skip\n pass\n except Exception as e:\n LOGGER.info(f'{prefix}{e}')\n\n def __call__(self, im, labels, p=1.0):\n if self.transform and random.random() < p:\n new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed\n im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n return im, labels"
},
{
"identifier": "augment_hsv",
"path": "utils/augmentations.py",
"snippet": "def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed"
},
{
"identifier": "classify_albumentations",
"path": "utils/augmentations.py",
"snippet": "def classify_albumentations(\n augment=True,\n size=224,\n scale=(0.08, 1.0),\n ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33\n hflip=0.5,\n vflip=0.0,\n jitter=0.4,\n mean=IMAGENET_MEAN,\n std=IMAGENET_STD,\n auto_aug=False):\n # YOLOv5 classification Albumentations (optional, only used if package is installed)\n prefix = colorstr('albumentations: ')\n try:\n import albumentations as A\n from albumentations.pytorch import ToTensorV2\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n if augment: # Resize and crop\n T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]\n if auto_aug:\n # TODO: implement AugMix, AutoAug & RandAug in albumentation\n LOGGER.info(f'{prefix}auto augmentations are currently not supported')\n else:\n if hflip > 0:\n T += [A.HorizontalFlip(p=hflip)]\n if vflip > 0:\n T += [A.VerticalFlip(p=vflip)]\n if jitter > 0:\n color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue\n T += [A.ColorJitter(*color_jitter, 0)]\n else: # Use fixed crop for eval set (reproducibility)\n T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]\n T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor\n LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))\n return A.Compose(T)\n\n except ImportError: # package not installed, skip\n LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')\n except Exception as e:\n LOGGER.info(f'{prefix}{e}')"
},
{
"identifier": "classify_transforms",
"path": "utils/augmentations.py",
"snippet": "def classify_transforms(size=224):\n # Transforms to apply if albumentations not installed\n assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'\n # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])\n return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])"
},
{
"identifier": "copy_paste",
"path": "utils/augmentations.py",
"snippet": "def copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)\n\n result = cv2.flip(im, 1) # augment segments (flip left-right)\n i = cv2.flip(im_new, 1).astype(bool)\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments"
},
{
"identifier": "letterbox",
"path": "utils/augmentations.py",
"snippet": "def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)"
},
{
"identifier": "mixup",
"path": "utils/augmentations.py",
"snippet": "def mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels"
},
{
"identifier": "random_perspective",
"path": "utils/augmentations.py",
"snippet": "def random_perspective(im,\n targets=(),\n segments=(),\n degrees=10,\n translate=.1,\n scale=.1,\n shear=10,\n perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments) and len(segments) == n\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return im, targets"
},
{
"identifier": "DATASETS_DIR",
"path": "utils/general.py",
"snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):"
},
{
"identifier": "torch_distributed_zero_first",
"path": "utils/torch_utils.py",
"snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n # Decorator to make all processes in distributed training wait for each local_master to do something\n if local_rank not in [-1, 0]:\n dist.barrier(device_ids=[local_rank])\n yield\n if local_rank == 0:\n dist.barrier(device_ids=[0])"
}
] | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
import mss
import pafy
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first | 8,344 | im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
| # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Dataloaders and dataset utils
"""
# Parameters
HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info['exif'] = exif.tobytes()
return image
def seed_worker(worker_id):
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def create_dataloader(path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
quad=False,
prefix='',
shuffle=False,
seed=0):
if rect and shuffle:
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + seed + RANK)
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels)
check_requirements('mss')
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.img_size = img_size
self.stride = stride
self.transforms = transforms
self.auto = auto
self.mode = 'stream'
self.frame = 0
self.sct = mss.mss()
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor['top'] if top is None else (monitor['top'] + top)
self.left = monitor['left'] if left is None else (monitor['left'] + left)
self.width = width or monitor['width']
self.height = height or monitor['height']
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
def __iter__(self):
return self
def __next__(self):
# mss screen capture: get raw pixels from the screen as np array
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources) | self.sources = [clean_str(x) for x in sources] # clean source names for later | 8 | 2023-11-12 13:28:26+00:00 | 12k |
BSoD123456/ffta_us_cn | ffta_modifier.py | [
{
"identifier": "load_rom",
"path": "ffta_sect.py",
"snippet": "def report(*args):\ndef readval_le(raw, offset, size, signed):\ndef writeval_le(val, dst, offset, size):\ndef rvs_endian(src, size, dst_signed):\n def __init__(self, raw, offset):\n def raw(self):\n def mod(self):\n def buf_offset(self):\n def buf_real_offset(self):\n def real_offset(self):\n def accessable_top(self):\n def shift(self, offs):\n def extendto(self, cnt):\n def readval(self, pos, cnt, signed):\n def writeval(self, val, pos, cnt):\n def fill(self, val, pos, cnt):\n def findval(self, val, pos, cnt, signed):\n def forval(self, cb, pos, cnt, signed):\n def BYTES(self, pos, cnt):\n def WBYTES(self, dst, pos):\n def STR(self, pos, cnt, codec = 'utf8'):\n def WSTR(self, dst, pos, codec = 'utf8'):\n def BYTESN(self, pos):\n def STRN(self, pos, codec = 'utf8'):\n def FBYTES(self, dst, pos, stp = 1):\n def FSTR(self, dst, pos, stp = 1, codec = 'utf8'):\n def sub(self, pos, length = None, cls = None):\n def concat(self, dst, pos = None):\n def parse(self):\n def parse_size(self, top_ofs, top_align_width):\n def sect_align(self):\n def sect_top_align(self):\n def sect_top(self):\n def sect_top_least(self):\n def sect_top_nondeterm(self):\n def set_nondeterm(self):\n def set_real_top(self, real_top):\n def in_sect(self, offs):\n def _offs2addr(self, offs):\n def _addr2offs(self, addr):\n def aot(self, v, typ):\n def rdptr(self, ptr, typ = 'oao'):\n def repack_copy(self, sect_top = None):\n def realign(self, align, base):\n def _repack_end(self, rmk):\n def _repack_with(self, tab, can_extend = False):\n def repack_with(self, tab, *args, **kargs):\n def sect_align(self):\n def has_tsize(self):\n def tab_top(self):\n def parse_size(self, top_ofs, top_align_width):\n def tbase(self, idx):\ndef tabitm(ofs = 0, part = None):\n def _mod(mth):\n def _wrap(self, idx, *args, **kargs):\ndef tabkey(key):\n def _mod(cls):\n def _getkey(self, k):\n def _TAB_REF_CLS():\n def get_entry(self, ofs):\n def last_idx(self):\n def last_idxs(self):\n def _is_last(self, idx):\n def _ref_top_nondeterm(self, idx):\n def _init_ref(self, sub, idx, ofs):\n def sect_top(self):\n def refresh_sect_top(self):\n def sect_top_least(self):\n def get_ref(self, idx):\n def _tab_acs_top(self):\n def _guess_size(self, top_ofs, upd_sz):\n def parse_size(self, top_ofs, top_align_width):\n def _iter_item(self, path, skiprep, refresh):\n def iter_item(self, skiprep = False, refresh = False):\n def _repack_content(self, tab, base):\n def _repack_with(self, tab):\n def set_sub_offset(self, ofs):\n def get_entry(self, idx):\n def _repack_end(self, rmk, base):\n def _repack_with(self, tab, base):\n def set_info(self, host, tlen, hole_idxs = None, ignore_invalid_ptr = False):\n def _ref_top_nondeterm(self, idx):\n def _tab_acs_top(self):\n def get_entry(self, idx):\n def get_ref(self, idx):\n def parse_size(self, top_ofs, top_align_width):\n def _repack_end(self, rmk, base):\n def _repack_with(self, tab, base):\ndef meta_c_ffta_sect_tab_flex(ent_fmt, alt_ptrs = None):\n def set_info(self, tlen):\n def get_entry(self, ofs):\n def _repack_with(self, tab):\n def get_entry(self, ofs):\n def iter_lines(self):\n def _guess_size(self):\n def parse_size(self, top_ofs, top_align_width):\n def _repack_with(self, tab):\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def parse(self):\n def _get_prms(self, cofs, clen):\n def _get_cmd(self, idx):\n def get_cmd(self, ofs):\n def _extend_line(self):\n def prmsfunc(n):\n def iter_lines_to(self, ofs):\n def _repack_with(self, tab):\n def get_cmd_addr(self, ofs):\n def get_cmd_len(self, ofs):\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _TAB_REF_CLS():\n def _gc(self, si):\n def _bypass(self, si, di, d, l):\n def _flip(di, d, l, f):\n def _bset(di, d, l, v):\n def _decompress(self, dst, src_idx, dst_len):\n def parse(self):\n def _repack_with(self, toks):\n def parse(self):\n def _make_ctr_tab(self):\n def _gc(self):\n def _bc(self):\n def _directly_mode(self, n):\n def _get_ctr_04(self):\n def _get_ctr_32(self):\n def _get_tok(self):\n def _decode(self):\n def _encode_tok(self, buf, tok):\n def _encode(self, toks):\n def _repack_with(self, toks):\n def _get_ctr_03(self):\n def _get_ctr_04(self):\n def _get_tok(self):\n def set_info(self, info):\n def _get_bits(self, ofs, bidx, blen, rvs, cch):\n def gen_char(self, ofs):\n def _rowgen():\n def _repack_char(self, fdat):\n def _gen(d):\n def _repack_with(self, finfo):\n def setup(self, tabs_info, rom_ed = None):\n def patch(self, patchs):\n def set_info(self, tabs_info):\n def _subsect(self, offs, c_sect, pargs, nondet = False):\n def subsect(self, offs, c_sect, *pargs, **kargs):\n def _subsect_ptr(self, offs_ptr, c_sect, pargs):\n def _add_tabs(self, tabs_info):\n def _replace_ptrs(self, rplc_ptrs):\n def _repack_end(self, rmk):\n def _repack_with(self, tabs):\ndef _pages_sect_info(info):\ndef _words_sect_info(info):\ndef _trim_raw_len(raw, st):\ndef load_rom_us(fn):\ndef load_rom_jp(fn):\ndef load_rom_cn(fn):\ndef main():\n def enum_text():\n def enum_script(fat_idx, mofs = 0x3f):\n def enum_b_script(p_idx, mofs = 0x3f):\n def t01():\nINF = float('inf')\n I8 = lambda self, pos: self.readval(pos, 1, True)\n U8 = lambda self, pos: self.readval(pos, 1, False)\n I16 = lambda self, pos: self.readval(pos, 2, True)\n U16 = lambda self, pos: self.readval(pos, 2, False)\n I32 = lambda self, pos: self.readval(pos, 4, True)\n U32 = lambda self, pos: self.readval(pos, 4, False)\n I64 = lambda self, pos: self.readval(pos, 8, True)\n U64 = lambda self, pos: self.readval(pos, 8, False)\n W8 = lambda self, val, pos: self.writeval(val, pos, 1)\n W16 = lambda self, val, pos: self.writeval(val, pos, 2)\n W32 = lambda self, val, pos: self.writeval(val, pos, 4)\n W64 = lambda self, val, pos: self.writeval(val, pos, 8)\n FI8 = lambda self, val, pos: self.findval(val, pos, 1, True)\n FU8 = lambda self, val, pos: self.findval(val, pos, 1, False)\n FI16 = lambda self, val, pos: self.findval(val, pos, 2, True)\n FU16 = lambda self, val, pos: self.findval(val, pos, 2, False)\n FI32 = lambda self, val, pos: self.findval(val, pos, 4, True)\n FU32 = lambda self, val, pos: self.findval(val, pos, 4, False)\n FI64 = lambda self, val, pos: self.findval(val, pos, 8, True)\n FU64 = lambda self, val, pos: self.findval(val, pos, 8, False)\n FORI8 = lambda self, cb, pos: self.forval(cb, pos, 1, True)\n FORU8 = lambda self, cb, pos: self.forval(cb, pos, 1, False)\n FORI16 = lambda self, cb, pos: self.forval(cb, pos, 2, True)\n FORU16 = lambda self, cb, pos: self.forval(cb, pos, 2, False)\n FORI32 = lambda self, cb, pos: self.forval(cb, pos, 4, True)\n FORU32 = lambda self, cb, pos: self.forval(cb, pos, 4, False)\n FORI64 = lambda self, cb, pos: self.forval(cb, pos, 8, True)\n FORU64 = lambda self, cb, pos: self.forval(cb, pos, 8, False)\n ADDR_BASE = 0x8000000\n _SECT_ALIGN = 1\n _SECT_TOP_ALIGN = 1\n _TAB_WIDTH = 1\n _TAB_ALIGN = INF\n _TAB_WIDTH = 4\n _TAB_WIDTH = sum(v[1] for v in ent_fmt)\n _TAB_ALT_PTRS = tuple(alt_ptrs) if alt_ptrs else None\n _TAB_WIDTH = 4\n _TAB_WIDTH = 4\n _TAB_WIDTH = 4\n _TAB_WIDTH = 2\n _TAB_WIDTH = 2\n _TAB_WIDTH = 6\n _TAB_WIDTH = 4\n _TAB_WIDTH = 4\n _TAB_WIDTH = 2\n _SECT_TOP_ALIGN = 4\n _SECT_ALIGN = 2\n _CTR_TOKBASE = 0x21\n _CTR_TOKLEN = [\n # read 2\n [0x40, 0x41, 0x42, 0x4a, 0x4d, 0x4f, 0x52, 0x54, 0x56, 0x57, 0x58],\n # read 3\n [0x00, 0x1b, 0x1d, 0x46, 0x4b, 0x51, 0x53, 0x32, 0x04],\n # read 4\n [0x45],\n ]\n _CTR_TOKSPEC = [0x32, 0x04]\n _CTR_TRANS = [\n 0x4, 0x46, 0x4b, 0x45,\n 0x45, None, 0x53, 0x51,\n 0x4d, 0x52, 0x42, 0x57,\n 0x54, 0x40, 0x4f, 0x41,\n 0x56, 0x4a, None, 0x1b,\n 0x1d, 0x58, 0x0\n ]\n _CTR_TOKSPEC = [0x3, 0x4]\n ARG_SELF = c_symb()\nclass c_mark:\nclass c_ffta_sect(c_mark):\nclass c_ffta_sect_tab(c_ffta_sect):\nclass c_ffta_sect_tab_ref(c_ffta_sect_tab):\nclass c_ffta_sect_tab_ref_sub(c_ffta_sect_tab_ref):\nclass c_ffta_sect_tab_ref_addr(c_ffta_sect_tab_ref):\n class c_ffta_sect_tab_flex(c_ffta_sect_tab):\nclass c_ffta_sect_scene_fat(c_ffta_sect_tab):\nclass c_ffta_sect_scene_script(c_ffta_sect_tab_ref):\nclass c_ffta_sect_scene_script_group(c_ffta_sect_tab_ref):\nclass c_ffta_sect_battle_script(c_ffta_sect_tab_ref):\nclass c_ffta_sect_battle_script_group(c_ffta_sect_tab_ref):\nclass c_ffta_sect_script_page(c_ffta_sect):\nclass c_ffta_sect_script_cmds(c_ffta_sect_tab):\nclass c_ffta_sect_text(c_ffta_sect_tab_ref):\nclass c_ffta_sect_text_sub(c_ffta_sect_tab_ref_sub):\nclass c_ffta_sect_fixed_text(c_ffta_sect_tab_ref_addr):\nclass c_ffta_sect_words_text(c_ffta_sect_tab_ref_addr):\nclass c_ffta_sect_text_page(c_ffta_sect_tab_ref):\nclass c_ffta_sect_text_line(c_ffta_sect):\nclass c_ffta_sect_text_buf(c_ffta_sect):\nclass c_ffta_sect_text_buf_ya(c_ffta_sect_text_buf):\nclass c_ffta_sect_font(c_ffta_sect_tab):\nclass c_ffta_sect_rom(c_ffta_sect):"
},
{
"identifier": "c_ffta_charset_ocr",
"path": "ffta_charset.py",
"snippet": "class c_ffta_charset_ocr(c_ffta_charset_base):\n\n def __init__(self, path, rom):\n super().__init__(path)\n self.rom = rom\n\n def reset(self):\n self.ocr()\n\n def ocr(self):\n from ffta_font import c_ffta_font_drawer\n from ffta_ocr import c_ffta_ocr_parser, iter_toks\n if self.rom:\n dr = c_ffta_font_drawer(self.rom.tabs['font'])\n ocr = c_ffta_ocr_parser(iter_toks(self.rom), dr)\n ocr.parse()\n ocr.feed_all()\n else:\n ocr = c_ffta_ocr_parser(None, None)\n ocr.parse(noambi = True)\n self.chst, self.chst_r = ocr.final_charset()\n self.save()"
},
{
"identifier": "c_ffta_charset_dynamic",
"path": "ffta_charset.py",
"snippet": "class c_ffta_charset_dynamic(c_ffta_charset_base):\n\n def __init__(self, path, nosave = False):\n super().__init__(path)\n self.nosave = nosave\n\n def load(self, src, base):\n self.base_char = base\n if super().load():\n mxc = 0\n for c in src.chst:\n if c > mxc:\n mxc = c\n self.chst_dyidx = mxc + 1\n else:\n for c, ch in src.chst.items():\n if c < base:\n self.chst[c] = ch\n for ch, c in src.chst_r.items():\n if c < base:\n self.chst_r[ch] = c\n self.chst_dyidx = base\n self.dirty = False\n\n def iter_dychrs(self):\n if self.dirty and not self.nosave:\n self.save()\n for c in range(self.base_char, self.chst_dyidx):\n yield c, self.chst[c]\n\n def _encode_char(self, char):\n ch = super()._encode_char(char)\n if ch is None:\n ch = self.chst_dyidx\n self.chst_dyidx += 1\n #report('debug', f'record new char {char} to 0x{ch:x}')\n self.chst_r[char] = ch\n self.chst[ch] = char\n self.dirty = True\n return ch"
},
{
"identifier": "make_ffta_font_gen",
"path": "ffta_font_generator.py",
"snippet": "def make_ffta_font_gen(name, size, offset):\n return c_font_gen(\n name, size, [\n # outline\n {\n (-1, -1): 1,\n ( 0, -1): 1,\n ( 1, -1): 1,\n (-1, 0): 1,\n ( 1, 0): 1,\n (-1, 1): 1,\n ( 0, 1): 1,\n ( 1, 1): 1,\n },\n # shadow\n {\n ( 1, 0): 2,\n ( 1, 1): 2,\n },\n # center\n {\n ( 0, 0): 3,\n },\n ],\n (8, 16, 2),\n offset,\n )"
},
{
"identifier": "make_script_parser",
"path": "ffta_parser.py",
"snippet": "def make_script_parser(rom, typ):\n if typ == 'scene':\n return c_ffta_scene_script_parser({\n 'fat': rom.tabs['s_fat'],\n 'script': rom.tabs['s_scrpt'],\n 'cmds': rom.tabs['s_cmds'],\n 'text': rom.tabs['s_text'],\n 'fx_text': rom.tabs['fx_text'],\n })\n elif typ == 'battle':\n return c_ffta_battle_script_parser({\n 'script': rom.tabs['b_scrpt'],\n 'cmds': rom.tabs['b_cmds'],\n })\n else:\n raise ValueError(f'invalid script type {typ}')"
},
{
"identifier": "collect_text_cmds",
"path": "ffta_parser.py",
"snippet": "def collect_text_cmds(psr, dtab = None, gen_sc61 = True):\n if not isinstance(psr, c_ffta_scene_script_parser):\n return None\n def _pck_rslt(rslt):\n if (rslt['win_type'] != 'normal' or\n rslt.get('txt_shared', False) or\n rslt.get('txt_invalid', False) ):\n return None\n return rslt['output'][1:]\n rtxts = []\n sc61 = None\n for prog in psr.iter_program():\n if prog.text_idx == 61:\n # with unsolved dynamic sub page\n # just ignore\n sc61 = prog.page_idx\n continue\n page = []\n txt_1st = True\n for cprms in prog.exec(\n cb_pck = _pck_rslt,\n flt = ['text']):\n sidxp = (prog.text_idx, cprms[0])\n sidxr = '/'.join(str(i) for i in sidxp)\n if not dtab is None and not dtab.get(sidxr, None):\n continue\n if txt_1st:\n page.append((*cprms, prog.page_idx))\n txt_1st = False\n else:\n page.append(cprms)\n if page:\n rtxts.append(page)\n if sc61 is None or not gen_sc61 or dtab is None:\n return rtxts\n page = []\n lst_rsubidx = None\n txt_1st = True\n for idxr, (sval, dval) in dtab.items():\n idxp = tuple(int(i) for i in idxr.split('/'))\n if idxp[0] != 61:\n continue\n stidx = idxp[2]\n subidx = idxp[1]\n assert subidx < 25\n rsubidx = stidx // 24 + 1 + 10 * subidx\n if rsubidx == lst_rsubidx:\n pass#subidx = 0\n else:\n lst_rsubidx = rsubidx\n if page:\n rtxts.append(page)\n page = []\n txt_1st = True\n if txt_1st:\n rsc = sc61\n txt_1st = False\n else:\n rsc = 0\n page.append((stidx, 0x1, 0x80, rsc, subidx))\n if page:\n rtxts.append(page)\n return rtxts"
}
] | import json, re
import os, os.path, shutil
import pdb
from ffta_sect import load_rom
from ffta_charset import c_ffta_charset_ocr, c_ffta_charset_dynamic
from ffta_font_generator import make_ffta_font_gen
from ffta_parser import make_script_parser, collect_text_cmds
from hexdump import hexdump as hd
from pprint import pprint | 7,522 | assert vd > vs
return sl
sl += 1
def _next_sinfo(self, si, sinfo):
itr, idxp, (vidxp, val) = sinfo
try:
nvidxp, nval = next(itr)
except StopIteration:
infi = (INF,)
sinfo[1] = infi
sinfo[2] = (infi, None)
return
sinfo[2] = (nvidxp, nval)
if si >= len(self.trmpg):
sinfo[1] = nvidxp
return
tpgs = self.trmpg[si]
cpg = None
for i in range(len(nvidxp), -1, -1):
pg = nvidxp[:i]
if pg in tpgs:
cpg = pg
break
if cpg is None:
sinfo[1] = nvidxp
return
sl = self._sublen_idx(nvidxp, vidxp)
if sl < len(cpg):
sinfo[1] = cpg
return
ridxp = []
for i in range(len(nvidxp)):
v = self._getidxv(idxp, i)
if i > sl:
v = 0
elif i == sl:
v += 1
ridxp.append(v)
sinfo[1] = tuple(ridxp)
def _next(self):
mnidxp = None
cidxps = []
for si, (itr, idxp, _) in enumerate(self.stats):
cidxp = self._calc_cidx(idxp, si)
cidxps.append(cidxp)
if mnidxp is None or self._cmp_idx(cidxp, mnidxp) < 0:
mnidxp = cidxp
if mnidxp and mnidxp[0] == INF:
return None, True
rs = []
for si, (sinfo, cidxp) in enumerate(zip(self.stats, cidxps)):
itr, idxp, (vidxp, val) = sinfo
if self._cmp_idx(cidxp, mnidxp) == 0:
rs.append((vidxp, val))
self._next_sinfo(si, sinfo)
else:
rs.append((vidxp, None))
return rs, False
def iter(self):
self.reset()
while True:
rs, is_done = self._next()
if is_done:
return
yield tuple(rs)
class c_ffta_modifier:
def __init__(self, conf):
self.conf = conf
def load(self):
self.srom = {}
self.chst = {}
for nm, rconf in self.conf['roms']['src'].items():
rom, chst = self.load_rom(rconf)
self.srom[nm] = rom
self.chst[nm] = chst
self.fntgen, self.chst['font'] = self.load_font()
self.txts = self.load_texts()
def export(self):
rmk = None
sben = self.conf.get('sandbox', {}).get('enable', False)
sbon = self.conf.get('sandbox', {}).get('only', False)
if not sben or not sbon:
rmk = self.export_rom(self.conf['roms']['dst']['rels'])
if sben:
sbrmk = self.export_rom(self.conf['roms']['dst']['sndbx'], as_sndbx = True)
if rmk is None:
rmk = sbrmk
return rmk
def export_rom(self, rom_conf, *args, **kargs):
rmk = self.repack(*args, **kargs)
if not rmk:
report('warning', f'something wrong while repacking')
return
self.save_rom(rom_conf['path'], rmk)
return rmk
def load_rom(self, rom_conf):
lfunc = load_rom[rom_conf['type']]
rom = lfunc(rom_conf['path'])
if 'charset' in rom_conf:
if 'charset_ocr' in rom_conf and rom_conf['charset_ocr']:
chstrom = rom
else:
chstrom = None
chst = c_ffta_charset_ocr(rom_conf['charset'], chstrom)
chst.load()
else:
chst = None
return rom, chst
def load_font(self):
conf = self.conf['font']
| #! python3
# coding: utf-8
CONF = {
'roms': {
'src': {
'base': {
'path': r'roms\fftaus.gba',
'type': 'us',
'charset': 'charset_us.json',
},
'text': {
'path': r'roms\fftacnfx.gba',
'type': 'cn',
'charset': 'charset_cn.json',
'charset_ocr': True,
},
},
'dst': {
'rels': {
'path': r'roms\fftauscn.gba',
},
'sndbx': {
'path': r'roms\fftauscn_sndbx.gba',
},
},
},
'work': {
'text': {
'raw': {
# comparison
'comp': 'raw_txt_comp_wk.json',
# uncovered
'uncv': 'raw_txt_uncv_wk.json',
},
'src': {
# base rom
'base': 'src_txt_base_wk.json',
# text rom
'text': 'src_txt_text_wk.json',
},
'mod': {
# translate
'trans': 'trans_txt.json',
},
'fix': {
# fix cn text
'fcomp': 'trans_fix_txt.json',
},
},
},
'font': {
# 精品点阵体9×9 BoutiqueBitmap9x9
# from https://github.com/scott0107000/BoutiqueBitmap9x9
'src': 'font/BoutiqueBitmap9x9_1.7.ttf',
'size': 10,
'offset': (0, 1),
'charset': 'charset_uscn_wk.json',
'charset_nosave': True,
# only hanzi
'dybase': 0x122,
},
'text': {
'skip': {
'@[40]@[42]',
'@[42]',
'dummy@[40]@[42]',
'dummy@[42]',
'dummy',
'Dummy',
},
'skipf': [],
'skipf_defer': [],
'modf': [],
'align': {
's_text': [
((36,), (35,)),
((60,), (60,)),
],
'pages:battle': [
((54,), (51,)),
],
'pages:quest/': [
((1, 0), (0, 200)),
],
'fx_text': [
((8, 60), (8, 58)),
((8, 61), (8, 60)),
((8, 62), (8, 62)),
((25,), (24,)),
],
'words:refer': [
((107,), (104,)),
],
'words:rumor': [
((62,), (61,)),
((63,), (63,)),
],
'words:battle': [
((179,), (176,)),
((543,), (531,)),
],
},
'trim': {
's_text': [{
(61,),
}, {
(61,),
}],
},
},
'sandbox': {
'enable': False,
'only': True,
'scene': {
'boot': 2,
},
'script': {
'__arg_scene_text_start': None,
'scene': lambda txts: (lambda c, f: {
2: {
0: [
#0x19, 0xdc, 0xa,
*c('''
<C: 05>
<48: 00>
<1F: 0F 43 00 20 00>
<29: 0F 00>
<27: 0F 07 00 00>
#<1F: 11 87 02 20 00>
#<24: 11>
#<2E: 11 00 00 08 00 00 00>
#<1F: 10 87 02 20 00>
#<24: 10>
#<2E: 10 00 00 08 00 00 00>
<4B: 14>
<4D: 64 01 00 01>
<47: 90 00 E8 00 01 00>
<3F: 87 77 00>
<3F: 43 53 01>
<3F: 53 54 02>
<3F: 77 57 03>
<3F: 57 56 04>
<3F: 87 67 05>
<3F: 67 66 06>
<3F: 87 57 07>
<3F: 56 46 08>
<3F: 46 45 09>
<3F: 57 47 0A>
<3F: 47 46 0B>
<3F: 66 65 0C>
'''),
*f['fade'](False),
*f['wait'](60),
0x19, 'start',
*c('''
flip:
<27: 0F 06 00 00>
<02: >
'''),
'start:',
*f['move'](0xf, 5, 4, 3),
*f['face'](0xf, 0),
0x27, 0xF, 0x16, 0, 0,
0x12, 0xaf,
0x71, 0x3, 0x0,
0x19, 'skip2',
*c('''
<1F: 4E 56 00 20 00>
<27: 4E 05 00 00>
'''),
*[
v
for tab in txts
for v in [
*c('''
<27: 0F 06 00 00>
<27: 0F 06 00 00>
'''),
*[
v
for targs in tab
for v in [
0x1, 'flip',
*(
f['text_full'](*targs) if targs[1] == 0xf else
f['text_full'](*targs, chlp=0x63, chld=3)
),
]
],
0x12, 0xb0,
0x71, 0x3, 0x0,
0x19, 'skip',
]
],
'skip:',
0x27, 0x4E, 6, 0, 0,
'skip2:',
*f['move'](0xf, 6, 4, 3),
*f['move'](0xf, 7, 8, 0),
*f['wait'](30),
*f['text_full'](171, 0xf, 0x80, 2),
*f['face'](0xf, 3),
*f['text_full'](172, 0xf, 0x80),
*f['text_full'](173, 0xf, 0x80),
*f['face'](0xf, 0),
*f['fade'](True),
*f['setflag'](0x301),
*f['done'](5),
],
},
})(lambda s: [
int(v, 16) if len(v) <= 2 else v
for r in [rr.split('#')[0].strip() for rr in s.splitlines()] if r
for v in (r[1:-1].replace(':', '').split() if r.startswith('<') else [r])
], {
'wait': lambda frms: [
0x15, frms,
],
'move': lambda ch, x, y, d, spd=1: [
0x3e, ch, x, y, d, 0, spd, 0,
],
'warp': lambda ch, x, y: [
0x2f, ch, x, y,
],
'face': lambda ch, d, spd=1: [
0x25, ch, d, spd,
],
'fade': lambda is_out, frms=60: [
0x6, 0x31, frms, 0x0,
] if is_out else [
0x6, 0x13, frms, 0x64,
],
'load': lambda sc: [
# load scene
0x1c, sc, 0x0,
],
'done': lambda typ=2: [
# safe return
0x17, typ,
],
'setflag': lambda fidx, val=1: [
0x1a, fidx & 0xff, fidx >> 8, val,
],
'text_full': lambda tidx, prt, flg, sc=0, sub=0, chlp=0, chld=0: (
lambda rsub, rtidx: [
*([
# set sc_idx at 0x2002192 = 0x162 + 0x2002030
0x1b, 0x62, 0x1, sc,
] if sc > 0 else []),
*([
# set sub_idx at 0x2003c2a = 0x1bfa + 0x2002030
0x1b, 0xfa, 0x1b, rsub,
] if sub > 0 else []),
*([
# load char at chlp
0x1f, prt, chlp, chld, 0x20, 0x0,
] if prt > 1 and chlp > 0 else []),
*([
0xf, rtidx, prt, flg,
] if sub > 0 else [
0xf, tidx, prt, flg,
]),
*([
# unload char
0x24, prt,
0x22, prt,
] if prt > 1 and chlp > 0 else []),
#*([], prt == 0x17 and breakpoint())[0],
]
)(
tidx // 24 + 1 + 10 * sub, tidx % 24
),
}),
},
'text': {
's_text': {
'1/171': '对了,@[4D]还有一件事。@[40]@[42]',
'1/172': '从这里出去后,@[4D]所有流言都将被解锁。@[4F]@[42]读过第1条流言后,@[4D]新增的20个任务@[4D]会解锁前10个。@[4F]@[42]读过第2条后,@[4D]会解锁后10个。@[40]@[42]',
'1/173': '但是因为任务@[4D]最多只能有15个,@[4D]请存档后分别解锁。@[40]@[42]',
},
'fx_text': {
'1/47': '要偷看书的后面吗?@[4D]@[3210]是/@[ ]否@[ ]@[42]',
'1/48': '要继续吗?@[4D]@[3210]是/@[ ]否@[ ]@[42]',
},
},
'direct': {
'rumor_data': {
(0, 6): {
'flag1': 0x301,
'val1': 0,
'flag2': 0,
'val2': 0,
},
(6, 0x7f): {
'flag1': 0x301,
'val1': 1,
'flag2': 0,
'val2': 0,
},
},
'quest_data': {
(377, 397): {
'flag1': 0x301,
'val1': 1,
'flag2': 0x507,
'val2': 1,
'flag3': 0,
'val3': 0,
'nest': 0,
},
(387, 397): {
'flag2': 0x508,
'val2': 1,
},
381: {
'_uk3': 161,
},
(384, 386): {
'_uk3': 161,
},
387: {
'_uk3': 161,
},
(393, 395): {
'_uk2': 0,
},
},
},
},
}
def chk_has_japanese(txt, *_):
for c in txt:
oc = ord(c)
if (0x3040 < oc < 0x3094 or
0x30a0 < oc < 0x30fb):
return True
return False
def chk_invalid_words(txt, tname, *_):
if tname == 'words:rumor':
return txt.isdigit()
return False
CONF['text']['skipf'].extend([
chk_has_japanese,
chk_invalid_words,
])
def mod_static_refer(bt, tt, tname, bidxp, tidxp, btxts, ttxts):
REF_TOP = 104
if not '@[51' in tt:
return tt
bwt = btxts['words:refer']
twt = ttxts['words:refer']
def _rplc(m):
refv = int(m.group(1), 16)
refi = (refv,)
if refv < REF_TOP:
sv = bwt[refi]
#if not sv.startswith('CRN_'):
return m.group(0)
assert refi in twt
return twt[refi]
return re.sub(r'\@\[51([0-9a-fA-F]{2})\]', _rplc, tt)
CONF['text']['modf'].extend([
mod_static_refer,
])
def codejumper(cd):
labs = {}
r = []
for c in cd:
if isinstance(c, str):
if c.endswith(':'):
labs[c[:-1]] = len(r)
else:
r.append(c)
r.append(None)
else:
r.append(c)
#dirty = False
for i in range(len(r)):
c = r[i]
if not isinstance(c, str):
continue
if not c in labs:
raise ValueError(f'unknown lable: {c}')
assert i < len(r) - 1 and r[i+1] is None
d = labs[c] - i - 2
if d < 0:
d += 0x10000
r[i] = (d & 0xff)
r[i+1] = (d >> 8)
#dirty = True
return r
CONF['sandbox']['script']['__mod_scene'] = codejumper
def report(*args):
r = ' '.join(a for a in args if a)
print(r)
return r
INF = float('inf')
class c_tab_align_iter:
def __init__(self, *tabs, align_map = [], trim_page = []):
self.tabs = tabs
self.amap = self._hndl_amap(align_map)
self.trmpg = trim_page
def _hndl_amap(self, align_map):
add_lsts = []
for amap_itm in align_map:
mxidxp = None
cidxps = []
for i, idxp in enumerate(amap_itm):
while i >= len(add_lsts):
add_lsts.append([])
add_lst = add_lsts[i]
cidxp = idxp
for abas, adst in add_lst:
cidxp, _ = self._add_idx(cidxp, abas, adst)
cidxps.append(cidxp)
if mxidxp is None or self._cmp_idx(cidxp, mxidxp) > 0:
mxidxp = cidxp
for i, cidxp in enumerate(cidxps):
add_lst = add_lsts[i]
if self._cmp_idx(cidxp, mxidxp) == 0:
continue
add_lst.append((cidxp, mxidxp))
return add_lsts
def _iter_tab(self, idx):
tab = self.tabs[idx]
if tab:
yield from tab.items()
def reset(self):
self.stats = []
for i in range(len(self.tabs)):
itr = self._iter_tab(i)
zidx = tuple()
sinfo = [itr, zidx, (zidx, None)]
self._next_sinfo(i, sinfo)
self.stats.append(sinfo)
@staticmethod
def _getidxv(idxpath, i):
if i < len(idxpath):
return idxpath[i]
else:
return 0
def _cmp_idx(self, idxp1, idxp2):
for i in range(max(len(idxp1), len(idxp2))):
v1 = self._getidxv(idxp1, i)
v2 = self._getidxv(idxp2, i)
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
def _trim_idx(self, idxp):
for i in range(len(idxp) - 1, -1, -1):
if idxp[i] != 0:
break
else:
return tuple()
return tuple(idxp[:i+1])
def _add_idx(self, src, abas, adst):
if self._cmp_idx(src, abas) < 0:
return src, False
r = []
do_add = True
for i in range(max(len(src), len(abas), len(adst))):
vs = self._getidxv(src, i)
vb = self._getidxv(abas, i)
vd = self._getidxv(adst, i)
vr = vs
if do_add:
vr += vd - vb
if vs != vb:
do_add = False
r.append(vr)
return self._trim_idx(r), True
def _calc_cidx(self, idxp, si):
if si >= len(self.amap):
return idxp
cidxp = idxp
for abas, adst in self.amap[si]:
cidxp, is_done = self._add_idx(cidxp, abas, adst)
if not is_done:
break
return cidxp
def _sublen_idx(self, dst, src):
sl = 0
for i in range(max(len(dst), len(src))):
vd = self._getidxv(dst, i)
vs = self._getidxv(src, i)
if vd != vs:
assert vd > vs
return sl
sl += 1
def _next_sinfo(self, si, sinfo):
itr, idxp, (vidxp, val) = sinfo
try:
nvidxp, nval = next(itr)
except StopIteration:
infi = (INF,)
sinfo[1] = infi
sinfo[2] = (infi, None)
return
sinfo[2] = (nvidxp, nval)
if si >= len(self.trmpg):
sinfo[1] = nvidxp
return
tpgs = self.trmpg[si]
cpg = None
for i in range(len(nvidxp), -1, -1):
pg = nvidxp[:i]
if pg in tpgs:
cpg = pg
break
if cpg is None:
sinfo[1] = nvidxp
return
sl = self._sublen_idx(nvidxp, vidxp)
if sl < len(cpg):
sinfo[1] = cpg
return
ridxp = []
for i in range(len(nvidxp)):
v = self._getidxv(idxp, i)
if i > sl:
v = 0
elif i == sl:
v += 1
ridxp.append(v)
sinfo[1] = tuple(ridxp)
def _next(self):
mnidxp = None
cidxps = []
for si, (itr, idxp, _) in enumerate(self.stats):
cidxp = self._calc_cidx(idxp, si)
cidxps.append(cidxp)
if mnidxp is None or self._cmp_idx(cidxp, mnidxp) < 0:
mnidxp = cidxp
if mnidxp and mnidxp[0] == INF:
return None, True
rs = []
for si, (sinfo, cidxp) in enumerate(zip(self.stats, cidxps)):
itr, idxp, (vidxp, val) = sinfo
if self._cmp_idx(cidxp, mnidxp) == 0:
rs.append((vidxp, val))
self._next_sinfo(si, sinfo)
else:
rs.append((vidxp, None))
return rs, False
def iter(self):
self.reset()
while True:
rs, is_done = self._next()
if is_done:
return
yield tuple(rs)
class c_ffta_modifier:
def __init__(self, conf):
self.conf = conf
def load(self):
self.srom = {}
self.chst = {}
for nm, rconf in self.conf['roms']['src'].items():
rom, chst = self.load_rom(rconf)
self.srom[nm] = rom
self.chst[nm] = chst
self.fntgen, self.chst['font'] = self.load_font()
self.txts = self.load_texts()
def export(self):
rmk = None
sben = self.conf.get('sandbox', {}).get('enable', False)
sbon = self.conf.get('sandbox', {}).get('only', False)
if not sben or not sbon:
rmk = self.export_rom(self.conf['roms']['dst']['rels'])
if sben:
sbrmk = self.export_rom(self.conf['roms']['dst']['sndbx'], as_sndbx = True)
if rmk is None:
rmk = sbrmk
return rmk
def export_rom(self, rom_conf, *args, **kargs):
rmk = self.repack(*args, **kargs)
if not rmk:
report('warning', f'something wrong while repacking')
return
self.save_rom(rom_conf['path'], rmk)
return rmk
def load_rom(self, rom_conf):
lfunc = load_rom[rom_conf['type']]
rom = lfunc(rom_conf['path'])
if 'charset' in rom_conf:
if 'charset_ocr' in rom_conf and rom_conf['charset_ocr']:
chstrom = rom
else:
chstrom = None
chst = c_ffta_charset_ocr(rom_conf['charset'], chstrom)
chst.load()
else:
chst = None
return rom, chst
def load_font(self):
conf = self.conf['font'] | chst = c_ffta_charset_dynamic( | 2 | 2023-11-12 18:43:53+00:00 | 12k |
bytedance/LapNet | lapnet/networks/orig_ferminet.py | [
{
"identifier": "envelopes",
"path": "lapnet/envelopes.py",
"snippet": "_MAX_POLY_ORDER = 5 # highest polynomial used in envelopes\n PRE_ORBITAL = enum.auto()\n PRE_DETERMINANT = enum.auto()\n POST_DETERMINANT = enum.auto()\n ISOTROPIC = enum.auto()\n ABS_ISOTROPIC = enum.auto()\n DIAGONAL = enum.auto()\n FULL = enum.auto()\n NULL = enum.auto()\n STO = enum.auto()\n STO_POLY = enum.auto()\n OUTPUT = enum.auto()\n EXACT_CUSP = enum.auto()\nclass EnvelopeType(enum.Enum):\nclass EnvelopeLabel(enum.Enum):\nclass EnvelopeInit(Protocol):\nclass EnvelopeApply(Protocol):\nclass Envelope:\n def __call__(\n self,\n natom: int,\n output_dims: Union[int, Sequence[int]],\n hf: Optional[scf.Scf],\n ndim: int) -> Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]:\n def __call__(self, *, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n **kwargs: jnp.ndarray) -> jnp.ndarray:\ndef _apply_covariance(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:\ndef make_isotropic_envelope(is_abs=False) -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_diagonal_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_full_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_null_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray,\n r_ee: jnp.ndarray) -> jnp.ndarray:\ndef make_sto_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray, n: jnp.ndarray) -> jnp.ndarray:\ndef make_sto_poly_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_output_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_exact_cusp_envelope(nspins: Tuple[int, int],\n charges: jnp.ndarray) -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef get_envelope(\n envelope_label: EnvelopeLabel,\n **kwargs: Any,\n) -> Envelope:"
},
{
"identifier": "sto",
"path": "lapnet/sto.py",
"snippet": "STO_6G_COEFFS = {\n 'Al': {\n '1s': (12.557246008517494, 25.106698511179705),\n '2s': (4.359619258385573, 12.927028645707773),\n '3s': (1.6997831803065009, 0.7615604776424957),\n '2p': (6.554000435218195, 94.73883433477683),\n '3p': (2.2351712617093247, 1.3827832310783092),\n },\n 'Ar': {\n '1s': (17.39539980503338, 40.93071746434361),\n '2s': (6.7395274109023875, 38.411458472102076),\n '3s': (2.329717983859512, 2.295580734042915),\n '2p': (10.084117748829376, 424.5775400483707),\n '3p': (3.060265563420203, 5.664664351842427),\n },\n 'B': {\n '1s': (4.67868616774112, 5.70938862708008),\n '2s': (1.4999288235962795, 0.8976437475218287),\n '2p': (2.2716468438938575, 2.3554214877163155),\n },\n 'Be': {\n '1s': (3.6790735046348035, 3.9812194646017587),\n '2s': (1.1499342729534248, 0.4619650556409617),\n '2p': (1.737594734756709, 0.9201071798798099),\n },\n 'C': {\n '1s': (5.668891371758942, 7.615256589029782),\n '2s': (1.719877299055101, 1.2637414721616658),\n '2p': (2.603224975467389, 3.791736047605733),\n },\n 'Ca': {\n '1s': (19.5709944613605, 48.83583953845113),\n '2s': (7.738732968586929, 54.260484887280676),\n '3s': (3.0096391625807035, 5.625243917454751),\n '4s': (1.3601380588639749, 0.12698321714720479),\n '2p': (11.44277220950772, 656.1541713931504),\n '3p': (3.951008639686456, 17.85199403770938),\n '4p': (1.6981898284489827, 0.14655638210969082),\n },\n 'Cl': {\n '1s': (16.42714984359141, 37.564449406190995),\n '2s': (6.258720160253116, 31.915499495709224),\n '3s': (2.0997377724242736, 1.5955272516248895),\n '2p': (9.379020216891835, 331.94837422065166),\n '3p': (2.7582728076424314, 3.550609030735584),\n },\n 'F': {\n '1s': (8.647725610389443, 14.3478532498991),\n '2s': (2.5498686974182796, 3.3822880320590616),\n '2p': (3.8381983263962307, 14.689479350772942),\n },\n 'H': {\n '1s': (1.2396569932253754, 0.7786482598438814),\n },\n 'He': {\n '1s': (1.6895399166477436, 1.2389169650053704),\n },\n 'K': {\n '1s': (18.603597538983905, 45.2640252413769),\n '2s': (7.259222119835751, 46.24588987232522),\n '3s': (2.7496572800200547, 4.100159535853956),\n '4s': (1.4301411982280272, 0.1591563672673338),\n '2p': (10.773926382192002, 534.0140415145601),\n '3p': (3.62088857101528, 12.145661267587803),\n '4p': (1.7834260775485395, 0.19104940003516785),\n },\n 'Li': {\n '1s': (2.689237663677221, 2.487921264218123),\n '2s': (0.7999592565921534, 0.18646529682488083),\n '2p': (1.2131148771860853, 0.2626425031316319),\n },\n 'Mg': {\n '1s': (10.606505744603167, 19.48727485747987),\n '2s': (3.4797896476024155, 7.358433661799946),\n '3s': (1.7497617130189205, 0.8428386648793419),\n '2p': (5.214100079417301, 42.5456609714464),\n '3p': (2.304293894314714, 1.5934349371819814),\n },\n 'N': {\n '1s': (6.668046567851146, 9.714119765863773),\n '2s': (1.9498640428999283, 1.7295555428587879),\n '2p': (2.953104555611183, 5.903584171692387),\n },\n 'Na': {\n '1s': (10.606505744603167, 19.48727485747987),\n '2s': (3.4797896476024155, 7.358433661799946),\n '3s': (1.7497617130189205, 0.8428386648793419),\n '2p': (5.214100079417301, 42.5456609714464),\n '3p': (2.304293894314714, 1.5934349371819814),\n },\n 'Ne': {\n '1s': (9.638169483462994, 16.882135191702933),\n '2s': (2.879839514936209, 4.5849782106230474),\n '2p': (4.313575648976462, 21.89010672369113),\n },\n 'O': {\n '1s': (7.658312143014643, 11.957311431836787),\n '2s': (2.2498858861805417, 2.4735485371920998),\n '2p': (3.382554479984454, 9.404304291880152),\n },\n 'P': {\n '1s': (14.49840434649223, 31.149470127449554),\n '2s': (5.3092768063608204, 21.157258029235972),\n '3s': (1.8997434641776487, 1.1239662327153015),\n '2p': (7.868086018599397, 177.5301649474899),\n '3p': (2.504478963796671, 2.3183467992744076),\n },\n 'S': {\n '1s': (15.468177117665267, 34.325592817926776),\n '2s': (5.788995339687754, 26.26240044766998),\n '3s': (2.0497271436527353, 1.4664153516476472),\n '2p': (8.713206244747633, 257.8924790569185),\n '3p': (2.69317030051437, 3.1966886670849703),\n },\n 'Si': {\n '1s': (13.528008654654725, 28.074801445575115),\n '2s': (4.8295674907671104, 16.69869883644322),\n '3s': (1.7497617129910332, 0.8428386647959631),\n '2p': (7.211841900405304, 131.56735728268595),\n '3p': (2.304293894345575, 1.5934349373181378),\n }\n}"
},
{
"identifier": "scf",
"path": "lapnet/utils/scf.py",
"snippet": "def stable_opt_internal(mf):\n def __init__(self,\n molecule: Optional[Sequence[system.Atom]] = None,\n nelectrons: Optional[Tuple[int, int]] = None,\n basis: Optional[str] = 'cc-pVTZ',\n pyscf_mol: Optional[pyscf.gto.Mole] = None,\n restricted: bool = True):\n def run(self, dm0: Optional[np.ndarray] = None):\n def eval_mos(self, positions: np.ndarray,\n deriv: bool = False) -> Tuple[np.ndarray, np.ndarray]:\nclass Scf:"
},
{
"identifier": "construct_input_features",
"path": "lapnet/networks/utils.py",
"snippet": "def construct_input_features(\n pos: jnp.ndarray,\n atoms: jnp.ndarray,\n ndim: int = 3) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Constructs inputs to Fermi Net from raw electron and atomic positions.\n\n Args:\n pos: electron positions. Shape (nelectrons*ndim,).\n atoms: atom positions. Shape (natoms, ndim).\n ndim: dimension of system. Change only with caution.\n\n Returns:\n ae, ee, r_ae, r_ee tuple, where:\n ae: atom-electron vector. Shape (nelectron, natom, ndim).\n ee: atom-electron vector. Shape (nelectron, nelectron, ndim).\n r_ae: atom-electron distance. Shape (nelectron, natom, 1).\n r_ee: electron-electron distance. Shape (nelectron, nelectron, 1).\n The diagonal terms in r_ee are masked out such that the gradients of these\n terms are also zero.\n \"\"\"\n assert atoms.shape[1] == ndim\n ae = jnp.reshape(pos, [-1, 1, ndim]) - atoms[None, ...]\n ee = jnp.reshape(pos, [1, -1, ndim]) - jnp.reshape(pos, [-1, 1, ndim])\n\n r_ae = jnp.linalg.norm(ae, axis=2, keepdims=True)\n # Avoid computing the norm of zero, as is has undefined grad\n n = ee.shape[0]\n r_ee = (\n jnp.linalg.norm(ee + jnp.eye(n)[..., None], axis=-1) * (1.0 - jnp.eye(n)))\n\n return ae, ee, r_ae, r_ee[..., None]"
},
{
"identifier": "network_blocks",
"path": "lapnet/networks/network_blocks.py",
"snippet": "def array_partitions(sizes: Sequence[int]) -> Sequence[int]:\ndef init_linear_layer(key: chex.PRNGKey,\n in_dim: int,\n out_dim: int,\n include_bias: bool = True) -> Mapping[str, jnp.ndarray]:\ndef linear_layer(x: jnp.ndarray,\n w: jnp.ndarray,\n b: Optional[jnp.ndarray] = None) -> jnp.ndarray:\ndef slogdet(x):\ndef individual_slogdet(xs: Sequence[jnp.ndarray],\n w: Optional[jnp.ndarray] = None):\ndef logdet_matmul(xs: Sequence[jnp.ndarray],\n w: Optional[jnp.ndarray] = None) -> jnp.ndarray:"
}
] | import functools
import attr
import chex
import jax
import lapjax.numpy as jnp
from typing import Any, Iterable, Mapping, Optional, Sequence, Tuple, Union
from lapnet import envelopes
from lapnet import sto
from lapnet.utils import scf
from .protocol import *
from .utils import construct_input_features
from lapnet.networks import network_blocks | 7,429 | output_dims = dims_orbital_in
elif options.envelope.apply_type == envelopes.EnvelopeType.PRE_DETERMINANT:
# Applied to orbitals.
output_dims = nspin_orbitals
elif options.envelope.apply_type == envelopes.EnvelopeType.POST_DETERMINANT:
# Applied to all determinants.
output_dims = 1
else:
raise ValueError('Unknown envelope type')
params['envelope'] = options.envelope.init(
natom=natom, output_dims=output_dims, hf=hf_solution, ndim=ndim)
# orbital shaping
key, subkey = jax.random.split(key, num=2)
params['orbital'] = init_orbital_shaping(
key=subkey,
input_dim=dims_orbital_in,
nspin_orbitals=nspin_orbitals,
bias_orbitals=options.bias_orbitals)
if hf_solution is not None:
params['single'], params['orbital'] = init_to_hf_solution(
hf_solution=hf_solution,
single_layers=params['single'],
orbital_layer=params['orbital'],
determinants=options.determinants,
active_spin_channels=active_spin_channels,
eps=eps)
return params
## Network layers ##
def make_ferminet_features(charges: Optional[jnp.ndarray] = None,
nspins: Optional[Tuple[int, ...]] = None,
ndim: int = 3) -> FeatureLayer:
"""Returns the init and apply functions for the standard features."""
del charges, nspins
def init() -> Tuple[Tuple[int, int], Param]:
return (ndim + 1, ndim + 1), {}
def apply(ae, r_ae, ee, r_ee) -> Tuple[jnp.ndarray, jnp.ndarray]:
ae_features = jnp.concatenate((r_ae, ae), axis=2)
ae_features = jnp.reshape(ae_features, [jnp.shape(ae_features)[0], -1])
ee_features = jnp.concatenate((r_ee, ee), axis=2)
return ae_features, ee_features
return FeatureLayer(init=init, apply=apply)
def construct_symmetric_features(h_one: jnp.ndarray, h_two: jnp.ndarray,
nspins: Tuple[int, int]) -> jnp.ndarray:
"""Combines intermediate features from rank-one and -two streams.
Args:
h_one: set of one-electron features. Shape: (nelectrons, n1), where n1 is
the output size of the previous layer.
h_two: set of two-electron features. Shape: (nelectrons, nelectrons, n2),
where n2 is the output size of the previous layer.
nspins: Number of spin-up and spin-down electrons.
Returns:
array containing the permutation-equivariant features: the input set of
one-electron features, the mean of the one-electron features over each
(occupied) spin channel, and the mean of the two-electron features over each
(occupied) spin channel. Output shape (nelectrons, 3*n1 + 2*n2) if there are
both spin-up and spin-down electrons and (nelectrons, 2*n1 + n2) otherwise.
"""
# Split features into spin up and spin down electrons
spin_partitions = network_blocks.array_partitions(nspins)
h_ones = jnp.split(h_one, spin_partitions, axis=0)
h_twos = jnp.split(h_two, spin_partitions, axis=0)
# Construct inputs to next layer
# h.size == 0 corresponds to unoccupied spin channels.
g_one = [jnp.mean(h, axis=0, keepdims=True) for h in h_ones if h.size > 0]
g_two = [jnp.mean(h, axis=0) for h in h_twos if h.size > 0]
g_one = [jnp.tile(g, [h_one.shape[0], 1]) for g in g_one]
return jnp.concatenate([h_one] + g_one + g_two, axis=1)
def fermi_net_orbitals(
params,
pos: jnp.ndarray,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions = FermiNetOptions(),
):
"""Forward evaluation of the Fermionic Neural Network up to the orbitals.
Args:
params: A dictionary of parameters, containing fields:
`atoms`: atomic positions, used to construct input features.
`single`: a list of dictionaries with params 'w' and 'b', weights for the
one-electron stream of the network.
`double`: a list of dictionaries with params 'w' and 'b', weights for the
two-electron stream of the network.
`orbital`: a list of two weight matrices, for spin up and spin down (no
bias is necessary as it only adds a constant to each row, which does not
change the determinant).
`dets`: weight on the linear combination of determinants
`envelope`: a dictionary with fields `sigma` and `pi`, weights for the
multiplicative envelope.
pos: The electron positions, a 3N dimensional vector.
atoms: Array with positions of atoms.
nspins: Tuple with number of spin up and spin down electrons.
options: Network configuration.
Returns:
One matrix (two matrices if options.full_det is False) that exchange columns
under the exchange of inputs of shape (ndet, nalpha+nbeta, nalpha+nbeta) (or
(ndet, nalpha, nalpha) and (ndet, nbeta, nbeta)) and a tuple of (ae, r_ae,
r_ee), the atom-electron vectors, distances and electron-electron distances.
"""
| # Copyright 2020 DeepMind Technologies Limited.
# Copyright 2023 Bytedance Ltd. and/or its affiliate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Fermionic Neural Network in JAX."""
# import jax.numpy as jnp
## Network settings ##
@attr.s(auto_attribs=True, kw_only=True)
class FermiNetOptions:
"""Options controlling the FermiNet architecture.
Attributes:
ndim: dimension of system. Change only with caution.
hidden_dims: Tuple of pairs, where each pair contains the number of hidden
units in the one-electron and two-electron stream in the corresponding
layer of the FermiNet. The number of layers is given by the length of the
tuple.
use_last_layer: If true, the outputs of the one- and two-electron streams
are combined into permutation-equivariant features and passed into the
final orbital-shaping layer. Otherwise, just the output of the
one-electron stream is passed into the orbital-shaping layer.
determinants: Number of determinants to use.
full_det: If true, evaluate determinants over all electrons. Otherwise,
block-diagonalise determinants into spin channels.
bias_orbitals: If true, include a bias in the final linear layer to shape
the outputs into orbitals.
envelope_label: Envelope to use to impose orbitals go to zero at infinity.
See envelopes module.
envelope: Envelope object to create and apply the multiplicative envelope.
feature_layer: Feature object to create and apply the input features for the
one- and two-electron layers.
"""
ndim: int = 3
hidden_dims: LayerArgs = ((256, 32), (256, 32), (256, 32), (256, 32))
use_last_layer: bool = False
determinants: int = 16
full_det: bool = True
bias_orbitals: bool = False
envelope_label: envelopes.EnvelopeLabel = envelopes.EnvelopeLabel.ABS_ISOTROPIC
envelope: envelopes.Envelope = attr.ib(
default=attr.Factory(
lambda self: envelopes.get_envelope(self.envelope_label),
takes_self=True))
feature_layer: FeatureLayer = attr.ib(
default=attr.Factory(
lambda self: make_ferminet_features(ndim=self.ndim), takes_self=True))
## Network initialisation ##
def init_layers(
key: chex.PRNGKey, dims_one_in: Sequence[int], dims_one_out: Sequence[int],
dims_two_in: Sequence[int],
dims_two_out: Sequence[int]) -> Tuple[Sequence[Param], Sequence[Param]]:
"""Initialises parameters for the FermiNet layers.
The final two-electron layer is not strictly necessary (i.e.
FermiNetOptions.use_last_layer is False), in which case the two-electron
stream contains one fewer layers than the one-electron stream.
Args:
key: JAX RNG state.
dims_one_in: dimension of inputs to each one-electron layer.
dims_one_out: dimension of outputs (number of hidden units) in each
one-electron layer.
dims_two_in: dimension of inputs to each two-electron layer.
dims_two_out: dimension of outputs (number of hidden units) in each
two-electron layer.
Returns:
Pair of sequences (length: number of layers) of parameters for one- and
two-electon streams.
Raises:
ValueError: if dims_one_in and dims_one_out are different lengths, or
similarly for dims_two_in and dims_two_out, or if the number of one-electron
layers is not equal to or one more than the number of two electron layers.
"""
if len(dims_one_in) != len(dims_one_out):
raise ValueError(
'Length of one-electron stream inputs and outputs not identical.')
if len(dims_two_in) != len(dims_two_out):
raise ValueError(
'Length of two-electron stream inputs and outputs not identical.')
if len(dims_two_in) not in (len(dims_one_out), len(dims_one_out) - 1):
raise ValueError('Number of layers in two electron stream must match or be '
'one fewer than the number of layers in the one-electron '
'stream')
single = []
double = []
ndouble_layers = len(dims_two_in)
for i in range(len(dims_one_in)):
key, subkey = jax.random.split(key)
single.append(
network_blocks.init_linear_layer(
subkey,
in_dim=dims_one_in[i],
out_dim=dims_one_out[i],
include_bias=True))
if i < ndouble_layers:
key, subkey = jax.random.split(key)
double.append(
network_blocks.init_linear_layer(
subkey,
in_dim=dims_two_in[i],
out_dim=dims_two_out[i],
include_bias=True))
return single, double
def init_orbital_shaping(
key: chex.PRNGKey,
input_dim: int,
nspin_orbitals: Sequence[int],
bias_orbitals: bool,
) -> Sequence[Param]:
"""Initialises orbital shaping layer.
Args:
key: JAX RNG state.
input_dim: dimension of input activations to the orbital shaping layer.
nspin_orbitals: total number of orbitals in each spin-channel.
bias_orbitals: whether to include a bias in the layer.
Returns:
Parameters of length len(nspin_orbitals) for the orbital shaping for each
spin channel.
"""
orbitals = []
for nspin_orbital in nspin_orbitals:
key, subkey = jax.random.split(key)
orbitals.append(
network_blocks.init_linear_layer(
subkey,
in_dim=input_dim,
out_dim=nspin_orbital,
include_bias=bias_orbitals))
return orbitals
def init_to_hf_solution(
hf_solution: scf.Scf,
single_layers: Sequence[Param],
orbital_layer: Sequence[Param],
determinants: int,
active_spin_channels: Sequence[int],
eps: float = 0.01) -> Tuple[Sequence[Param], Sequence[Param]]:
"""Sets initial parameters to match Hartree-Fock.
NOTE: this does not handle the envelope parameters, which are done in the
appropriate envelope initialisation functions. Not all envelopes support HF
initialisation.
Args:
hf_solution: Hartree-Fock state to match.
single_layers: parameters (weights and biases) for the one-electron stream,
with length: number of layers in the one-electron stream.
orbital_layer: parameters for the orbital-shaping layer, length: number of
spin-channels in the system.
determinants: Number of determinants used in the final wavefunction.
active_spin_channels: Number of particles in each spin channel containing at
least one particle.
eps: scaling factor for all weights and biases such that they are
initialised close to zero unless otherwise required to match Hartree-Fock.
Returns:
Tuple of parameters for the one-electron stream and the orbital shaping
layer respectively.
"""
# Scale all params in one-electron stream to be near zero.
single_layers = jax.tree_map(lambda param: param * eps, single_layers)
# Initialize first layer of Fermi Net to match s- or p-type orbitals.
# The sto and sto-poly envelopes can exactly capture the s-type orbital,
# so the effect of the neural network part is constant, while the p-type
# orbital also has a term multiplied by x, y or z.
j = 0
for ia, atom in enumerate(hf_solution.molecule):
coeffs = sto.STO_6G_COEFFS[atom.symbol]
for orb in coeffs.keys():
if orb[1] == 's':
single_layers[0]['b'] = single_layers[0]['b'].at[j].set(1.0)
j += 1
elif orb[1] == 'p':
w = single_layers[0]['w']
w = w.at[ia * 4 + 1:(ia + 1) * 4, j:j + 3].set(jnp.eye(3))
single_layers[0]['w'] = w
j += 3
else:
raise NotImplementedError('HF Initialization not implemented for '
'%s orbitals' % orb[1])
# Scale all params in orbital shaping to be near zero.
orbital_layer = jax.tree_map(lambda param: param * eps, orbital_layer)
for i, spin in enumerate(active_spin_channels):
# Initialize last layer to match Hartree-Fock weights on basis set.
norb = hf_solution.mean_field.mo_coeff[i].shape[0]
mat = hf_solution.mean_field.mo_coeff[i][:, :spin]
w = orbital_layer[i]['w']
for j in range(determinants):
w = w.at[:norb, j * spin:(j + 1) * spin].set(mat)
orbital_layer[i]['w'] = w
return single_layers, orbital_layer
def init_fermi_net_params(
key: chex.PRNGKey,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions,
hf_solution: Optional[scf.Scf] = None,
eps: float = 0.01,
) -> ParamTree:
"""Initializes parameters for the Fermionic Neural Network.
Args:
key: JAX RNG state.
atoms: (natom, ndim) array of atom positions.
nspins: A tuple with either the number of spin-up and spin-down electrons,
or the total number of electrons. If the latter, the spins are instead
given as an input to the network.
options: network options.
hf_solution: If present, initialise the parameters to match the Hartree-Fock
solution. Otherwise a random initialisation is use.
eps: If hf_solution is present, scale all weights and biases except the
first layer by this factor such that they are initialised close to zero.
Returns:
PyTree of network parameters. Spin-dependent parameters are only created for
spin channels containing at least one particle.
"""
if options.envelope_label in (envelopes.EnvelopeLabel.STO,
envelopes.EnvelopeLabel.STO_POLY):
if options.bias_orbitals:
raise ValueError('Cannot bias orbitals w/STO envelope.')
if hf_solution is not None:
if options.use_last_layer:
raise ValueError('Cannot use last layer w/HF init')
if options.envelope.apply_type not in ('sto', 'sto-poly'):
raise ValueError('When using HF init, '
'envelope_type must be `sto` or `sto-poly`.')
active_spin_channels = [spin for spin in nspins if spin > 0]
nchannels = len(active_spin_channels)
if nchannels == 0:
raise ValueError('No electrons present!')
params = {}
(num_one_features, num_two_features), params['input'] = (
options.feature_layer.init())
# The input to layer L of the one-electron stream is from
# construct_symmetric_features and shape (nelectrons, nfeatures), where
# nfeatures is i) output from the previous one-electron layer; ii) the mean
# for each spin channel from each layer; iii) the mean for each spin channel
# from each two-electron layer. We don't create features for spin channels
# which contain no electrons (i.e. spin-polarised systems).
nfeatures = lambda out1, out2: (nchannels + 1) * out1 + nchannels * out2
natom, ndim = atoms.shape
# one-electron stream, per electron:
# - one-electron features per atom (default: electron-atom vectors
# (ndim/atom) and distances (1/atom)),
# two-electron stream, per pair of electrons:
# - two-electron features per electron pair (default: electron-electron
# vector (dim) and distance (1))
feature_one_dims = natom * num_one_features
feature_two_dims = num_two_features
dims_one_in = (
[nfeatures(feature_one_dims, feature_two_dims)] +
[nfeatures(hdim[0], hdim[1]) for hdim in options.hidden_dims[:-1]])
dims_one_out = [hdim[0] for hdim in options.hidden_dims]
if options.use_last_layer:
dims_two_in = ([feature_two_dims] +
[hdim[1] for hdim in options.hidden_dims[:-1]])
dims_two_out = [hdim[1] for hdim in options.hidden_dims]
else:
dims_two_in = ([feature_two_dims] +
[hdim[1] for hdim in options.hidden_dims[:-2]])
dims_two_out = [hdim[1] for hdim in options.hidden_dims[:-1]]
if not options.use_last_layer:
# Just pass the activations from the final layer of the one-electron stream
# directly to orbital shaping.
dims_orbital_in = options.hidden_dims[-1][0]
else:
dims_orbital_in = nfeatures(options.hidden_dims[-1][0],
options.hidden_dims[-1][1])
# How many spin-orbitals do we need to create per spin channel?
nspin_orbitals = []
for nspin in active_spin_channels:
if options.full_det:
# Dense determinant. Need N orbitals per electron per determinant.
norbitals = sum(nspins) * options.determinants
else:
# Spin-factored block-diagonal determinant. Need nspin orbitals per
# electron per determinant.
norbitals = nspin * options.determinants
nspin_orbitals.append(norbitals)
# Layer initialisation
key, subkey = jax.random.split(key, num=2)
params['single'], params['double'] = init_layers(
key=subkey,
dims_one_in=dims_one_in,
dims_one_out=dims_one_out,
dims_two_in=dims_two_in,
dims_two_out=dims_two_out)
# create envelope params
if options.envelope.apply_type == envelopes.EnvelopeType.PRE_ORBITAL:
# Applied to output from final layer of 1e stream.
output_dims = dims_orbital_in
elif options.envelope.apply_type == envelopes.EnvelopeType.PRE_DETERMINANT:
# Applied to orbitals.
output_dims = nspin_orbitals
elif options.envelope.apply_type == envelopes.EnvelopeType.POST_DETERMINANT:
# Applied to all determinants.
output_dims = 1
else:
raise ValueError('Unknown envelope type')
params['envelope'] = options.envelope.init(
natom=natom, output_dims=output_dims, hf=hf_solution, ndim=ndim)
# orbital shaping
key, subkey = jax.random.split(key, num=2)
params['orbital'] = init_orbital_shaping(
key=subkey,
input_dim=dims_orbital_in,
nspin_orbitals=nspin_orbitals,
bias_orbitals=options.bias_orbitals)
if hf_solution is not None:
params['single'], params['orbital'] = init_to_hf_solution(
hf_solution=hf_solution,
single_layers=params['single'],
orbital_layer=params['orbital'],
determinants=options.determinants,
active_spin_channels=active_spin_channels,
eps=eps)
return params
## Network layers ##
def make_ferminet_features(charges: Optional[jnp.ndarray] = None,
nspins: Optional[Tuple[int, ...]] = None,
ndim: int = 3) -> FeatureLayer:
"""Returns the init and apply functions for the standard features."""
del charges, nspins
def init() -> Tuple[Tuple[int, int], Param]:
return (ndim + 1, ndim + 1), {}
def apply(ae, r_ae, ee, r_ee) -> Tuple[jnp.ndarray, jnp.ndarray]:
ae_features = jnp.concatenate((r_ae, ae), axis=2)
ae_features = jnp.reshape(ae_features, [jnp.shape(ae_features)[0], -1])
ee_features = jnp.concatenate((r_ee, ee), axis=2)
return ae_features, ee_features
return FeatureLayer(init=init, apply=apply)
def construct_symmetric_features(h_one: jnp.ndarray, h_two: jnp.ndarray,
nspins: Tuple[int, int]) -> jnp.ndarray:
"""Combines intermediate features from rank-one and -two streams.
Args:
h_one: set of one-electron features. Shape: (nelectrons, n1), where n1 is
the output size of the previous layer.
h_two: set of two-electron features. Shape: (nelectrons, nelectrons, n2),
where n2 is the output size of the previous layer.
nspins: Number of spin-up and spin-down electrons.
Returns:
array containing the permutation-equivariant features: the input set of
one-electron features, the mean of the one-electron features over each
(occupied) spin channel, and the mean of the two-electron features over each
(occupied) spin channel. Output shape (nelectrons, 3*n1 + 2*n2) if there are
both spin-up and spin-down electrons and (nelectrons, 2*n1 + n2) otherwise.
"""
# Split features into spin up and spin down electrons
spin_partitions = network_blocks.array_partitions(nspins)
h_ones = jnp.split(h_one, spin_partitions, axis=0)
h_twos = jnp.split(h_two, spin_partitions, axis=0)
# Construct inputs to next layer
# h.size == 0 corresponds to unoccupied spin channels.
g_one = [jnp.mean(h, axis=0, keepdims=True) for h in h_ones if h.size > 0]
g_two = [jnp.mean(h, axis=0) for h in h_twos if h.size > 0]
g_one = [jnp.tile(g, [h_one.shape[0], 1]) for g in g_one]
return jnp.concatenate([h_one] + g_one + g_two, axis=1)
def fermi_net_orbitals(
params,
pos: jnp.ndarray,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions = FermiNetOptions(),
):
"""Forward evaluation of the Fermionic Neural Network up to the orbitals.
Args:
params: A dictionary of parameters, containing fields:
`atoms`: atomic positions, used to construct input features.
`single`: a list of dictionaries with params 'w' and 'b', weights for the
one-electron stream of the network.
`double`: a list of dictionaries with params 'w' and 'b', weights for the
two-electron stream of the network.
`orbital`: a list of two weight matrices, for spin up and spin down (no
bias is necessary as it only adds a constant to each row, which does not
change the determinant).
`dets`: weight on the linear combination of determinants
`envelope`: a dictionary with fields `sigma` and `pi`, weights for the
multiplicative envelope.
pos: The electron positions, a 3N dimensional vector.
atoms: Array with positions of atoms.
nspins: Tuple with number of spin up and spin down electrons.
options: Network configuration.
Returns:
One matrix (two matrices if options.full_det is False) that exchange columns
under the exchange of inputs of shape (ndet, nalpha+nbeta, nalpha+nbeta) (or
(ndet, nalpha, nalpha) and (ndet, nbeta, nbeta)) and a tuple of (ae, r_ae,
r_ee), the atom-electron vectors, distances and electron-electron distances.
"""
| ae, ee, r_ae, r_ee = construct_input_features(pos, atoms) | 3 | 2023-11-13 08:19:53+00:00 | 12k |
civrealm/civrealm | src/civrealm/envs/freeciv_wrapper/tensor_wrapper.py | [
{
"identifier": "TensorAction",
"path": "src/civrealm/envs/freeciv_wrapper/action_wrapper.py",
"snippet": "class TensorAction(Wrapper):\n \"\"\"\n A wrapper that defines tensor action spaces, transforms tensor actions into\n actions that could be handeled by FreecivBaseEnv instance, and adds masks to\n observations.\n\n TensorAction wrapper is composed of five wrappers, including `TruncateDiplCity`,\n `DiplomacyLoop`, `CombineTechResearchGoal`, `PersistentCityProduction`, and `EmbarkWrapper`.\n\n\n\n Parameters\n ----------\n env: TensorBase\n A FreecivBaseEnv instance that has been wrapped by TensorBase.\n\n Attributes\n ----------\n aciton_config: dict\n a dict that configs that specify sizes of mutable entities and action layout.\n mask: dict\n a dict of masks of type numpy ndarray indicating available actions and entities. 0-> unavilalbe, 1->availble.\n available_actions: dict\n cached info['available_actions'], a dict that indicates available actions.\n action_space: gymnasium.spaces.Dict\n a gymnasium.spaces.Dict with keys `['actor_type','city_id','unit_id',\n 'dipl_id','city_action_type','unit_action_type','dipl_action_type',\n 'gov_action_type','tech_action_type']`\n \"\"\"\n\n def __init__(self, env: TensorBase):\n self.action_config = env.get_wrapper_attr(\"config\")\n self.action_config[\"resize\"][\"dipl\"] = self.action_config[\"resize\"][\n \"others_player\"\n ]\n self.actor_type_list = self.action_config[\"actor_type_list\"]\n self.available_actions = {}\n self.mask = {}\n self.__turn = -1\n self.__dealing_with_incoming = False\n\n super().__init__(\n TruncateDiplCity(\n DiplomacyLoop(\n CombineTechResearchGoal(\n PersistentCityProduction(EmbarkWrapper(env))\n )\n )\n )\n )\n\n self.action_space = spaces.Dict(\n {\n \"actor_type\": spaces.Discrete(len(self.actor_type_list)),\n \"city_id\": spaces.Discrete(self.action_config[\"resize\"][\"city\"]),\n \"city_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"city\"].values())\n ),\n \"unit_id\": spaces.Discrete(self.action_config[\"resize\"][\"unit\"]),\n \"unit_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"unit\"].values())\n ),\n \"dipl_id\": spaces.Discrete(self.action_config[\"resize\"][\"dipl\"]),\n \"dipl_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"dipl\"].values())\n ),\n \"gov_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"gov\"].values())\n ),\n \"tech_action_type\": spaces.Discrete(\n sum(self.action_config[\"action_layout\"][\"tech\"].values())\n ),\n }\n )\n\n def step(self, action):\n # Get {k:value.item()} if value is array\n action = {\n k: (v.item() if isinstance(v, np.ndarray) else v) for k, v in action.items()\n }\n\n base_action = self.action(action)\n if tensor_debug:\n print(base_action)\n obs, reward, terminated, truncated, info = self.env.step(base_action)\n if tensor_debug:\n print(f\"reward:{reward},done:{terminated or truncated}\")\n\n obs = self.update_obs_with_mask(obs, info, action)\n return obs, reward, terminated, truncated, info\n\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[str, Any]] = None,\n **kwargs,\n ):\n obs, info = self.env.reset(seed=seed, options=options, **kwargs)\n obs = self.update_obs_with_mask(obs, info)\n return obs, info\n\n def action(self, action):\n \"\"\"\n Translate tensor action, a dict of keys `['actor_type','city_id','unit_id',\n 'dipl_id','city_action_type','unit_action_type','dipl_action_type',\n 'gov_action_type','tech_action_type']` to `FreecivBaseEnv` action,\n a tuple `(actor_type, entity_id, action_name)`.\n\n \"\"\"\n if tensor_debug:\n self._check_action_layout()\n\n actor_type = action[\"actor_type\"]\n actor_name = self.actor_type_list[actor_type]\n\n if actor_name == \"turn done\":\n return None\n if actor_name in [\"gov\", \"tech\"]:\n entity_pos = None\n entity_id = self.get_wrapper_attr(\"my_player_id\")\n action_index = action[actor_name + \"_action_type\"]\n else:\n entity_pos, action_index = (\n action[actor_name + \"_id\"],\n action[actor_name + \"_action_type\"],\n )\n entity_id = self.get_wrapper_attr(actor_name + \"_ids\")[\n action[actor_name + \"_id\"]\n ]\n\n if tensor_debug:\n assert (\n self.mask[actor_name + \"_action_type_mask\"][entity_pos, action_index]\n == 1\n ), f\"{actor_name} action of id pos {entity_pos}, \\\n action type index {action_index} is masked\"\n\n action_name = sorted(\n list(self.available_actions[actor_name][entity_id].keys())\n )[action_index]\n\n return (actor_name, entity_id, action_name)\n\n def update_obs_with_mask(self, observation, info, action=None):\n \"\"\"\n Update self.mask using observation, info and action from the unwrapped env,\n and add self.mask to the observation of the wrapped env.\n \"\"\"\n if info[\n \"turn\"\n ] != self.__turn or self.__dealing_with_incoming != self.get_wrapper_attr(\n \"dealing_with_incoming\"\n ):\n self.reset_mask()\n self.available_actions = deepcopy(info[\"available_actions\"])\n self.__turn = info[\"turn\"]\n self.__dealing_with_incoming = self.get_wrapper_attr(\"dealing_with_incoming\")\n self._update_mask(observation, info, action)\n\n return update(observation, deepcopy(self.mask))\n\n def reset_mask(self):\n \"\"\"\n Reset self.mask\n\n This is usually called at the start of a new turn to reset masks.\n \"\"\"\n # Reset mask\n sizes = self.action_config[\"resize\"]\n self.mask[\"actor_type_mask\"] = np.ones(\n len(self.actor_type_list), dtype=np.int32\n )\n\n # Units/Cities/Players and others Masks\n for field in [\"unit\", \"city\", \"others_unit\", \"others_city\", \"others_player\"]:\n self.mask[field + \"_mask\"] = np.ones(sizes[field], dtype=np.int32)[\n ..., np.newaxis\n ]\n\n # Units/Cities Id Masks same as their Masks\n self.mask[\"unit_id_mask\"] = self.mask[\"unit_mask\"]\n self.mask[\"city_id_mask\"] = self.mask[\"city_mask\"]\n\n # Dipl id mask\n self.mask[\"dipl_id_mask\"] = np.ones(sizes[\"dipl\"], dtype=np.int32)[\n ..., np.newaxis\n ]\n\n # Action type mask\n for field in [\"city\", \"unit\", \"dipl\"]:\n self.mask[field + \"_action_type_mask\"] = np.ones(\n (\n sizes[field],\n sum(self.action_config[\"action_layout\"][field].values()),\n ),\n dtype=np.int32,\n )\n for field in [\"gov\", \"tech\"]:\n self.mask[field + \"_action_type_mask\"] = np.ones(\n (sum(self.action_config[\"action_layout\"][field].values()),),\n dtype=np.int32,\n )\n\n def _update_mask(self, observation, info, action):\n # update self.mask using action, observation and info\n if action:\n self._mask_from_action(action)\n self._mask_from_obs(observation)\n self._mask_from_info(info)\n\n def _mask_from_action(self, action):\n # Mask out actions that have been performed in this turn.\n actor_type = action[\"actor_type\"]\n actor_name = self.actor_type_list[actor_type]\n if actor_name == \"unit\":\n # self.mask[\"unit_action_type_mask\"][\n # action[\"unit_id\"], action[\"unit_action_type\"]\n # ] = 0\n pass\n elif actor_name == \"city\":\n # self.mask[\"city_action_type_mask\"][action[\"city_id\"], :] = 0\n pass\n elif actor_name == \"gov\":\n self.mask[\"gov_action_type_mask\"][:] &= 0\n elif actor_name == \"tech\":\n self.mask[\"tech_action_type_mask\"][:] &= 0\n\n def _mask_from_obs(self, observation):\n # Mask mutable entities using observation\n\n # Mask out trailing spaces for unit and city\n self.mask[\"unit_id_mask\"][len(self.get_wrapper_attr(\"unit_ids\")) : :, :] = 0\n self.mask[\"city_id_mask\"][len(self.get_wrapper_attr(\"city_ids\")) : :, :] = 0\n self.mask[\"dipl_id_mask\"][len(self.get_wrapper_attr(\"dipl_ids\")) : :, :] = 0\n self.mask[\"unit_mask\"] = self.mask[\"unit_id_mask\"].copy()\n self.mask[\"city_mask\"] = self.mask[\"city_id_mask\"].copy()\n\n self.mask[\"unit_action_type_mask\"][\n len(self.get_wrapper_attr(\"unit_ids\")) : :, :\n ] = 0\n self.mask[\"city_action_type_mask\"][\n len(self.get_wrapper_attr(\"city_ids\")) : :, :\n ] = 0\n\n # Mask Unit\n for pos, unit_id in enumerate(\n self.get_wrapper_attr(\"unit_ids\")[: self.action_config[\"resize\"][\"unit\"]]\n ):\n unit = observation[\"unit\"][unit_id]\n if unit[\"moves_left\"] == 0 or self.unwrapped.civ_controller.unit_ctrl.units[\n unit_id\n ][\"activity\"] not in [\n ACTIVITY_IDLE,\n ACTIVITY_FORTIFIED,\n ACTIVITY_SENTRY,\n ACTIVITY_FORTIFYING,\n ]: # agent busy or fortified\n self.mask[\"unit_id_mask\"][pos] &= 0\n self.mask[\"unit_action_type_mask\"][pos, :] &= 0\n\n self.mask[\"others_unit_mask\"][\n len(self.get_wrapper_attr(\"others_unit_ids\")) : :, :\n ] &= 0\n self.mask[\"others_city_mask\"][\n len(self.get_wrapper_attr(\"others_city_ids\")) : :, :\n ] &= 0\n\n if self.get_wrapper_attr(\"researching\"):\n self.mask[\"tech_action_type_mask\"][:] &= 0\n if not self.get_wrapper_attr(\"researching\") and tensor_debug:\n print(f\"techs_researched: {self.get_wrapper_attr('techs_researched')}\")\n\n def _mask_from_info(self, info):\n others_player_num = len(info[\"available_actions\"].get(\"player\", {}).keys())\n self.mask[\"others_player_mask\"][others_player_num::, :] &= 0\n\n # Mask City and Unit\n for mutable in [\"city\", \"unit\", \"dipl\"]:\n entities = info[\"available_actions\"].get(mutable, {})\n if len(entities) == 0:\n self.mask[mutable + \"_action_type_mask\"][:, :] &= 0\n self.mask[mutable + \"_id_mask\"][:] &= 0\n continue\n for i, entity_id in enumerate(\n self.env.get_wrapper_attr(mutable + \"_ids\")[\n : self.action_config[\"resize\"][mutable]\n ]\n ):\n actions = entities.get(entity_id, {})\n if len(actions) == 0:\n self.mask[mutable + \"_action_type_mask\"][i, :] &= 0\n self.mask[mutable + \"_id_mask\"][i] &= 0\n continue\n for action_id, act_name in enumerate(sorted(list(actions.keys()))):\n self.mask[mutable + \"_action_type_mask\"][i, action_id] &= int(\n actions[act_name]\n )\n self.mask[mutable + \"_id_mask\"][i] &= int(\n any(self.mask[mutable + \"_action_type_mask\"][i])\n )\n for mutable in [\"city\", \"unit\", \"dipl\"]:\n actor_type_index = self.actor_type_list.index(mutable)\n self.mask[\"actor_type_mask\"][actor_type_index] &= int(\n any(self.mask[mutable + \"_id_mask\"])\n )\n\n # Mask Gov and Tech\n for immutable in [\"gov\", \"tech\"]:\n options = info[\"available_actions\"].get(immutable, {})\n if len(options) == 0:\n self.mask[immutable + \"_action_type_mask\"][:] &= 0\n continue\n my_player_id = self.get_wrapper_attr(\"my_player_id\")\n for action_id, act_name in enumerate(\n sorted(list(options[my_player_id].keys()))\n ):\n self.mask[immutable + \"_action_type_mask\"][action_id] &= int(\n options[my_player_id][act_name]\n )\n for immutable in [\"gov\", \"tech\"]:\n actor_type_index = self.actor_type_list.index(immutable)\n self.mask[\"actor_type_mask\"][actor_type_index] &= int(\n any(self.mask[immutable + \"_action_type_mask\"])\n )\n\n def _check_action_layout(self):\n action_layout = self.action_config[\"action_layout\"]\n for field in [\"city\", \"unit\"]:\n for id, entity in self.available_actions.get(field, {}).items():\n assert len(entity) == sum(action_layout[field].values())\n assert len(\n self.available_actions[\"gov\"][self.get_wrapper_attr(\"my_player_id\")]\n ) == sum(action_layout[\"gov\"].values())"
},
{
"identifier": "Wrapper",
"path": "src/civrealm/envs/freeciv_wrapper/core.py",
"snippet": "class Wrapper(gymnasium.Wrapper):\n def reset(self, *, seed=None, options=None, **kwargs):\n return self.env.reset(seed=seed, options=options, **kwargs)"
},
{
"identifier": "CacheLastObs",
"path": "src/civrealm/envs/freeciv_wrapper/observation_wrapper.py",
"snippet": "class CacheLastObs(Wrapper):\n \"\"\"\n Cache last observation, and override observation with cached observation\n if terminated or truncated.\n\n Attributes\n -------------\n cached_last_obs: dict\n observation cached from the last call of step() or reset()\n \"\"\"\n\n def __init__(self, env):\n self.cached_last_obs = None\n super().__init__(env)\n\n def step(self, action):\n obs, reward, terminated, truncated, info = self.env.step(action)\n\n if terminated or truncated:\n obs = self.cached_last_obs\n info = {} if info is None else info\n return obs, reward, terminated, truncated, info\n\n self.cached_last_obs = deepcopy(obs)\n return obs, reward, terminated, truncated, info"
},
{
"identifier": "TensorObservation",
"path": "src/civrealm/envs/freeciv_wrapper/observation_wrapper.py",
"snippet": "class TensorObservation(Wrapper):\n \"\"\"\n A wrapper that defines tensor observation space, transforms observations got from\n FreecivBaseEnv into tensor observations.\n\n Parameters\n ----------\n env:\n A FreecivBaseEnv wrapped by TensorBase wrapper\n\n Attributes\n ---------\n observation_config: dict\n tensor observation configuration\n observation_space: gymnasium.spaces.Dict\n a gymnasium.spaces.Dict with keys speficified in configuration;\n observation with keywords `mask` would not be removed.\n obs_initialized: bool\n whether observation spaces has been initialized\n obs_layout: dict\n a dict that specify shapes of flattened numpy arrays in observation\n \"\"\"\n\n mutable_fields = [\n \"city\",\n \"unit\",\n \"others_city\",\n \"others_unit\",\n \"others_player\",\n \"dipl\",\n ]\n immutable_fields = [\"map\", \"rules\", \"player\", \"gov\"]\n\n def __init__(self, env: TensorBase):\n self.obs_initialized = False\n self.observation_config = env.get_wrapper_attr(\"config\")\n self.observation_config[\"resize\"][\"dipl\"] = self.observation_config[\"resize\"][\n \"others_player\"\n ]\n self.obs_layout = {}\n self.others_player_ids = []\n super().__init__(env)\n\n def observation(self, observation):\n \"\"\"\n convert observations obtained from `FreecivBaseEnv` into a dict of flattend numpy arrays.\n \"\"\"\n # in case of gameover, return None as observation\n if len(observation.get(\"player\", {})) == 0:\n return None\n\n observation = deepcopy(observation)\n observation = self._merge_player_techs(observation)\n obs_dict = self._handle_dict(observation)\n obs = self._embed_immutable(deepcopy(obs_dict))\n obs = self._embed_mutable(obs)\n\n if not self.obs_initialized:\n self.observation_space = self._infer_obs_space(obs)\n self.obs_initialized = True\n if tensor_debug:\n self._check_obs_layout(obs)\n return obs\n\n def _handle_dict(self, obs):\n obs[\"city\"] = obs.get(\"city\", {})\n obs[\"unit\"] = obs.get(\"unit\", {})\n\n # TODO: This should be the base env's reponsibility\n # Add info to city and unit from civcontroller\n update(obs[\"city\"], self.unwrapped.civ_controller.city_ctrl.cities)\n update(obs[\"unit\"], self.unwrapped.civ_controller.unit_ctrl.units)\n # update player info with dipl_state\n update(obs[\"player\"], obs.get(\"dipl\", {}))\n\n my_player_id = self.get_wrapper_attr(\"my_player_id\")\n\n obs[\"dipl\"] = {\n player: state[\"diplomacy_clause_map\"]\n for player, state in obs.get(\"dipl\", {}).items()\n if player != my_player_id\n }\n for player, treaty in obs[\"dipl\"].items():\n obs[\"dipl\"][player] = self._encode_treaty(treaty, player)\n\n # remove unused fields and keep mask if given\n obs = {\n k: v\n for k, v in obs.items()\n if k in self.observation_config[\"filter_observation\"] or k.endswith(\"mask\")\n }\n\n # Add others fields and initialize\n\n obs[\"others_unit\"] = {}\n obs[\"others_city\"] = {}\n\n for field in [\"unit\", \"city\"]:\n for key, val in list(obs[field].items()):\n if val[\"owner\"] != my_player_id:\n # delete others' entity from unit and city\n obs[\"others_\" + field][key] = obs[field].pop(key)\n\n obs[\"others_player\"] = {\n key: obs[\"player\"].pop(key)\n for key in list(obs[\"player\"].keys())\n if key != my_player_id\n }\n obs[\"player\"] = obs[\"player\"][my_player_id]\n\n # Initialize build_cost with 0 for now\n obs[\"rules\"][\"build_cost\"] = 0\n\n mutable_fields = [field for field in obs.keys() if field in self.mutable_fields]\n immutable_fields = [\n field for field in obs.keys() if field in self.immutable_fields\n ]\n\n ops = self.observation_config[\"obs_ops\"]\n\n # Handle immutable\n # delete unused keywords and transform useful keywords\n def apply_ops(field):\n for k, val in list(obs[field].items()):\n if k in list(ops[field].keys()):\n obs[field][k] = ops[field][k](val)\n else:\n obs[field].pop(k)\n\n for field in immutable_fields:\n apply_ops(field)\n\n # Handle mutable\n # delete unused keywords and transform useful keywords\n def apply_ops_mutable(field):\n for entity_id, entity in list(obs[field].items()):\n for k, val in list(entity.items()):\n if k in list(ops[field].keys()):\n entity[k] = ops[field][k](val)\n else:\n entity.pop(k)\n\n for field in mutable_fields:\n apply_ops_mutable(field)\n\n self.others_player_ids = sorted(obs[\"others_player\"].keys())\n\n return obs\n\n def _embed_immutable(self, obs):\n immutable = {\n field: obs[field] for field in obs if field in self.immutable_fields\n }\n\n if not self.obs_initialized:\n for field, field_dict in immutable.items():\n self.obs_layout[field] = OrderedDict(\n [(k, field_dict[k].shape) for k in sorted(list(field_dict.keys()))]\n )\n\n for field, field_dict in immutable.items():\n # check field layout is correct\n if tensor_debug:\n assert self.obs_layout[field] == {\n k: v.shape for k, v in field_dict.items()\n }\n\n obs[field] = np.concatenate(\n [field_dict[k] for k in sorted(list(field_dict.keys()))], axis=-1\n ).astype(np.int32)\n return obs\n\n def _embed_mutable(self, obs):\n mutable = {field: obs[field] for field in obs if field in self.mutable_fields}\n mutable_layout = self.observation_config[\"obs_mutable_layout\"]\n\n if not self.obs_initialized:\n for field, entity_dict in mutable.items():\n layout = mutable_layout[field]\n self.obs_layout[field] = OrderedDict(\n [(key, layout[key]) for key in sorted(layout)]\n )\n\n for field, entity_dict in mutable.items():\n # for empty field, fill with zero\n if len(entity_dict) == 0:\n mutable[field] = np.zeros(\n [\n self.observation_config[\"resize\"][field],\n *reduce(add_shape, self.obs_layout[field].values()),\n ],\n dtype=np.int32,\n )\n continue\n if tensor_debug:\n # check entity layout is correct\n assert all(\n self.obs_layout[field] == {k: v.shape for k, v in entity.items()}\n for entity in entity_dict.values()\n )\n # combine every entity's properties into an array along the last axis\n entity_dict = {\n id: np.concatenate([entity[k] for k in sorted(entity.keys())], axis=-1)\n for id, entity in entity_dict.items()\n }\n # combine all entities in a field into an array along the first axis\n mutable[field] = np.stack(\n [entity_dict[id] for id in self.get_wrapper_attr(field + \"_ids\")],\n axis=0,\n ).astype(np.int32)\n\n # resize to maximum entity shape\n for field in mutable:\n size = self.observation_config[\"resize\"][field]\n mutable[field] = resize_data(mutable[field], size).astype(np.int32)\n\n update(obs, mutable)\n return obs\n\n def _infer_obs_space(self, observation) -> spaces.Dict:\n return spaces.Dict(\n [\n (key, spaces.Box(low=0, high=1000, shape=space.shape, dtype=np.int32))\n for key, space in observation.items()\n ]\n )\n\n def _check_obs_layout(self, obs):\n for field, val in self.obs_layout.items():\n shape = reduce(add_shape, val.values())\n assert shape[-1] == obs[field].shape[-1]\n\n def _merge_player_techs(self, obs):\n for player in obs[\"player\"].values():\n player[\"techs\"] = []\n for tech in sorted(obs[\"tech\"]):\n player_tech = player.pop(f\"tech_{tech}\")\n player[\"techs\"].append(player_tech if player_tech is not None else 255)\n return obs\n\n def _encode_treaty(self, treaty, player):\n encoded = {\n \"type\": np.zeros(10 * 2, dtype=np.int32),\n \"give_city\": np.zeros(\n self.observation_config[\"resize\"][\"city\"], dtype=np.int32\n ),\n \"ask_city\": np.zeros(\n self.observation_config[\"resize\"][\"others_city\"], dtype=np.int32\n ),\n \"give_gold\": 255,\n \"ask_gold\": 255,\n }\n\n for clause in treaty:\n value = clause[\"value\"]\n\n if clause[\"type\"] == player_const.CLAUSE_GOLD:\n gold = sum(int(value >= level) for level in GOLD_SET)\n if clause[\"giver\"] == player:\n encoded[\"ask_gold\"] = gold\n else:\n encoded[\"give_gold\"] = gold\n elif clause[\"type\"] == player_const.CLAUSE_CITY:\n if clause[\"giver\"] == player:\n city_list = self.get_wrapper_attr(\"others_city_ids\")\n field = \"ask_city\"\n else:\n city_list = self.get_wrapper_attr(\"city_ids\")\n field = \"give_city\"\n if value in city_list:\n city_idx = city_list.index(value)\n encoded[field][city_idx] = 1\n\n if clause[\"giver\"] == player:\n encoded[\"type\"][clause[\"type\"]] = 1\n else:\n encoded[\"type\"][clause[\"type\"] + 10] = 1\n\n return encoded"
},
{
"identifier": "TensorBase",
"path": "src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py",
"snippet": "class TensorBase(Wrapper):\n \"\"\"\n A basic wrapper that deals with config loading and entity id recording, \n required by all tensor-related wrappers.\n\n\n Parameters\n ----------\n env: FreecivBaseEnv\n config: dict\n tensor env configuration\n\n Attributes\n ---------\n config: dict\n A dict that specifies all configurations related to tensor wrapper.\n my_player_id: int\n My player id.\n unit_ids: list\n A sorted list of my unit ids.\n city_ids: list\n A sorted list of my city ids.\n others_unit_ids: list\n A sorted list of others unit ids.\n others_city_ids: list\n A sorted list of others city ids.\n dipl_ids : list\n A list of others player ids.\n units : dict\n ruleset information about units.\n unit_types :list\n A list of all unit types.\n unit_costs : list\n A list of int indicating unit costs.\n improvements : dict\n Ruleset information about city improvements.\n impr_costs :list\n A list of int indicating city improvements costs.\n\n \"\"\"\n\n def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):\n self.config = config\n self.my_player_id = -1\n\n # mutable ids\n self.unit_ids = []\n self.city_ids = []\n self.others_unit_ids = []\n self.others_city_ids = []\n self.dipl_ids = []\n\n # ruleset\n self.units = {}\n self.unit_types = []\n self.unit_costs = []\n self.improvements = {}\n self.impr_costs = []\n\n super().__init__(env)\n\n def update_sequence_ids(self, observation):\n \"\"\"\n Use city, unit and dipl information in observation to update ids.\n \"\"\"\n self.unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.dipl_ids = [\n player\n for player in sorted(observation.get(\"dipl\", {}).keys())\n if player != self.my_player_id\n ]\n\n def update_config(self):\n \"\"\"\n Update config using ruleset information at the start of the turn.\n \"\"\"\n self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types\n self.unit_types = [self.units[i][\"name\"] for i in range(len(self.units))]\n self.unit_costs = [self.units[i][\"build_cost\"] for i in range(len(self.units))]\n self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements\n self.impr_costs = [\n self.improvements[i][\"build_cost\"] for i in range(len(self.improvements))\n ]\n self.config[\"obs_ops\"][\"unit\"][\"type_rule_name\"] = onehotifier_maker(\n self.unit_types\n )\n self.config[\"obs_ops\"][\"rules\"][\"build_cost\"] = lambda _: np.array(\n self.unit_costs + self.impr_costs\n )\n\n def reset(self, *args, **kwargs):\n obs, info = self.env.reset(*args, **kwargs)\n self.my_player_id = self.unwrapped.civ_controller.player_ctrl.my_player_id\n\n self.update_config()\n self.update_sequence_ids(obs)\n return obs, info\n\n def step(self, *args, **kwargs):\n obs, reward, terminated, truncated, info = self.env.step(*args, **kwargs)\n self.update_sequence_ids(obs)\n return obs, reward, terminated, truncated, info"
}
] | import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .action_wrapper import TensorAction
from .core import Wrapper
from .observation_wrapper import CacheLastObs, TensorObservation
from .tensor_base_wrapper import TensorBase | 7,333 |
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs(
|
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs( | TensorObservation(TensorAction(TensorBase(env, config=config))) | 4 | 2023-11-18 19:35:50+00:00 | 12k |
RAIVNLab/MatFormer-OLMo | olmo/model.py | [
{
"identifier": "PathOrStr",
"path": "olmo/aliases.py",
"snippet": ""
},
{
"identifier": "BeamSearch",
"path": "olmo/beam_search.py",
"snippet": "class BeamSearch:\n \"\"\"\n Implements the beam search algorithm for decoding the most likely sequences.\n\n :param end_index: The index of the \"stop\" or \"end\" token in the vocabulary. Usually the EOS token ID.\n\n :param max_steps: The maximum number of decoding steps to take, i.e. the maximum length\n of the predicted sequences.\n\n :param beam_size: The width of the beam used.\n\n :param per_node_beam_size: The maximum number of candidates to consider per node, at each step in the search.\n If not given, this just defaults to `beam_size`. Setting this parameter\n to a number smaller than `beam_size` may give better results, as it can introduce\n more diversity into the search. See\n [*Beam Search Strategies for Neural Machine Translation*, Freitag and Al-Onaizan, 2017]\n (https://api.semanticscholar.org/CorpusID:2229477).\n\n :param sampler: An optional `Sampler` which is used to pick next candidate nodes and beams.\n If not specified, `DeterministicSampler` will be used, which just takes the\n `per_node_beam_size` most likely nodes and the `beam_size` most likely beams.\n\n Using the [`GumbelSampler`](#gumbelsampler), on the other hand, will give you\n [Stochastic Beam Search](https://api.semanticscholar.org/CorpusID:76662039).\n\n :param min_steps: The minimum number of decoding steps to take, i.e. the minimum length of\n the predicted sequences. This does not include the start or end tokens. If `None`,\n no minimum is enforced.\n\n :param final_sequence_scorer: An optional `FinalSequenceScorer` which is used to score the final generated sequences.\n The output from this module is what is returned by the `search` method. If not\n specified, `SequenceLogProbabilityScorer` will be used, which scores the sequences\n by the sum of the token log probabilities.\n\n :param constraints: An optional list of `Constraint`s which should be applied during beam search. If not\n provided, no constraints will be enforced.\n\n \"\"\"\n\n def __init__(\n self,\n end_index: int,\n *,\n max_steps: int = 50,\n beam_size: int = 10,\n per_node_beam_size: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n min_steps: Optional[int] = None,\n final_sequence_scorer: Optional[FinalSequenceScorer] = None,\n constraints: Optional[List[Constraint]] = None,\n ) -> None:\n if not max_steps > 0:\n raise ValueError(\"max_steps must be positive\")\n if not beam_size > 0:\n raise ValueError(\"beam_size must be positive\")\n if per_node_beam_size is not None and not per_node_beam_size > 0:\n raise ValueError(\"per_node_beam_size must be positive\")\n if min_steps is not None:\n if not min_steps >= 0:\n raise ValueError(\"min_steps must be non-negative\")\n if not min_steps <= max_steps:\n raise ValueError(\"min_steps must be less than or equal to max_steps\")\n\n self._end_index = end_index\n self.max_steps = max_steps\n self.beam_size = beam_size\n self.per_node_beam_size = per_node_beam_size or beam_size\n self.sampler = sampler or DeterministicSampler()\n self.min_steps = min_steps or 0\n self.final_sequence_scorer = final_sequence_scorer or SequenceLogProbabilityScorer()\n self.constraints = constraints or []\n\n @staticmethod\n def _reconstruct_sequences(predictions, backpointers):\n # Reconstruct the sequences.\n # shape: [(batch_size, beam_size, 1)]\n reconstructed_predictions = [predictions[-1].unsqueeze(2)]\n\n if not backpointers:\n return reconstructed_predictions\n\n # shape: (batch_size, beam_size)\n cur_backpointers = backpointers[-1]\n\n for timestep in range(len(predictions) - 2, 0, -1):\n # shape: (batch_size, beam_size, 1)\n cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)\n\n reconstructed_predictions.append(cur_preds)\n\n # shape: (batch_size, beam_size)\n cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)\n\n # shape: (batch_size, beam_size, 1)\n final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)\n\n reconstructed_predictions.append(final_preds)\n\n return reconstructed_predictions\n\n def search(\n self,\n start_predictions: torch.Tensor,\n start_state: StateType,\n step: StepFunctionType,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Given a starting state and a step function, apply beam search to find the\n most likely target sequences.\n\n Returns a tuple of `(predictions, final_scores)`, where `predictions`\n has shape `(batch_size, beam_size, max_steps)` and `final_scores`\n has shape `(batch_size, beam_size)`.\n\n .. note::\n If your step function returns `-inf` for some log probabilities\n (like if you're using a masked log-softmax) then some of the \"best\"\n sequences returned may also have `-inf` log probability. Specifically\n this happens when the beam size is smaller than the number of actions\n with finite log probability (non-zero probability) returned by the step function.\n Therefore if you're using a mask you may want to check the results from `search`\n and potentially discard sequences with non-finite log probability.\n\n :param start_predictions: A tensor containing the initial predictions with shape `(batch_size,)`.\n Usually the initial predictions are just the index of the \"start\" token\n in the target vocabulary.\n\n :param start_state: The initial state passed to the `step` function. Each value of the state dict\n should be a tensor of shape `(batch_size, *)`, where `*` means any other\n number of dimensions.\n\n :param step: A function that is responsible for computing the next most likely tokens,\n given the current state and the predictions from the last time step.\n The function should accept two or three arguments:\n\n - a tensor of shape `(group_size,)` or representing the index of the predicted\n tokens from the last time step,\n - the current state, a `StateType`, and\n - optionally, the timestep, an `int`.\n\n The `group_size` will be `batch_size * beam_size`, except in the initial\n step, for which it will just be `batch_size`.\n\n The function is expected to return a tuple, where the first element\n is a tensor of shape `(group_size, vocab_size)` containing\n the log probabilities of the tokens for the next step, and the second\n element is the updated state. The tensor in the state should have shape\n `(group_size, *)`, where `*` means any other number of dimensions.\n\n \"\"\"\n step_signature = signature(step)\n if len(step_signature.parameters) < 3:\n # If the step function we're given does not take the time step argument, wrap it\n # in one that does.\n old_step = cast(StepFunctionTypeNoTimestep, step)\n\n def new_step(last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], time_step: int):\n del time_step\n return old_step(last_predictions, state)\n\n return self._search(start_predictions, start_state, new_step)\n else:\n return self._search(start_predictions, start_state, cast(StepFunctionTypeWithTimestep, step))\n\n def _search(\n self,\n start_predictions: torch.Tensor,\n start_state: StateType,\n step: StepFunctionTypeWithTimestep,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n batch_size = start_predictions.size()[0]\n\n # List of (batch_size, beam_size) tensors. One for each time step. Does not\n # include the start symbols, which are implicit.\n predictions: List[torch.Tensor] = []\n\n # List of (batch_size, beam_size) tensors. One for each time step. None for\n # the first. Stores the index n for the parent prediction, i.e.\n # predictions[t-1][i][n], that it came from.\n backpointers: List[torch.Tensor] = []\n\n constraint_states = [constraint.init_state(batch_size) for constraint in self.constraints]\n\n # Calculate the first timestep. This is done outside the main loop\n # because we are going from a single decoder input (the output from the\n # encoder) to the top `beam_size` decoder outputs. On the other hand,\n # within the main loop we are going from the `beam_size` elements of the\n # beam to `beam_size`^2 candidates from which we will select the top\n # `beam_size` elements for the next iteration.\n # shape: (batch_size, num_classes)\n start_class_log_probabilities, state = step(start_predictions, start_state, 0)\n\n num_classes = start_class_log_probabilities.size()[1]\n\n # Make sure `per_node_beam_size` is not larger than `num_classes`.\n if self.per_node_beam_size > num_classes:\n raise ValueError(\n f\"Vocab size ({num_classes:d}) too small \"\n f\"relative to per_node_beam_size ({self.per_node_beam_size:d}).\\n\"\n f\"Please decrease beam_size or per_node_beam_size.\"\n )\n\n sampler_state = self.sampler.init_state(start_class_log_probabilities, batch_size, num_classes)\n\n # Apply all constraints.\n if self.constraints:\n # shape: (batch_size, 1, num_classes)\n expanded_start_class_log_probabilities = start_class_log_probabilities.unsqueeze(1)\n for constraint, constraint_state in zip(self.constraints, constraint_states):\n expanded_start_class_log_probabilities = constraint.apply(\n constraint_state, expanded_start_class_log_probabilities\n )\n start_class_log_probabilities = expanded_start_class_log_probabilities.squeeze(1)\n\n # Prevent selecting the end symbol if there is any min_steps constraint\n if self.min_steps >= 1:\n start_class_log_probabilities[:, self._end_index] = torch.finfo(\n start_class_log_probabilities.dtype\n ).min\n\n # Get the initial predicted classed and their log probabilities.\n # shape: (batch_size, beam_size), (batch_size, beam_size)\n (\n start_top_log_probabilities,\n start_predicted_classes,\n sampler_state,\n ) = self.sampler.sample_beams(start_class_log_probabilities, self.beam_size, sampler_state)\n\n if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():\n warnings.warn(\n \"Empty sequences predicted. You may want to increase the beam size or ensure \"\n \"your step function is working properly.\",\n RuntimeWarning,\n )\n return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities\n\n # The log probabilities for the last time step.\n # shape: (batch_size, beam_size)\n last_log_probabilities = start_top_log_probabilities\n\n # shape: [(batch_size, beam_size)]\n predictions.append(start_predicted_classes)\n\n # Log probability tensor that mandates that the end token is selected.\n # shape: (batch_size * beam_size, num_classes)\n log_probs_after_end = start_class_log_probabilities.new_full(\n (batch_size * self.beam_size, num_classes),\n torch.finfo(start_class_log_probabilities.dtype).min,\n )\n log_probs_after_end[:, self._end_index] = 0.0\n\n # Set the same state for each element in the beam.\n self._update_initial_state(state, batch_size)\n\n for i, constraint in enumerate(self.constraints):\n constraint_states[i] = constraint.update_state(constraint_states[i], start_predicted_classes)\n\n for timestep in range(self.max_steps - 1):\n # shape: (batch_size * beam_size,)\n last_predictions = predictions[-1].reshape(batch_size * self.beam_size)\n\n # If every predicted token from the last step is `self._end_index`,\n # then we can stop early.\n if (last_predictions == self._end_index).all():\n break\n # Take a step. This get the predicted log probs of the next classes\n # and updates the state.\n # shape: (batch_size * beam_size, num_classes)\n class_log_probabilities, state = step(last_predictions, state, timestep + 1)\n\n # Apply all constraints.\n if self.constraints:\n # shape: (batch_size, beam_size, num_classes)\n reshaped_class_log_probabilities = class_log_probabilities.view(batch_size, self.beam_size, -1)\n for constraint, constraint_state in zip(self.constraints, constraint_states):\n reshaped_class_log_probabilities = constraint.apply(\n constraint_state, reshaped_class_log_probabilities\n )\n # shape: (batch_size * beam_size, num_classes)\n class_log_probabilities = reshaped_class_log_probabilities.view(batch_size * self.beam_size, -1)\n\n # The `timestep`-th iteration of the for loop is generating the `timestep + 2`-th token\n # of the sequence (because `timestep` is 0-indexed and we generated the first token\n # before the for loop). Here we block the end index if the search is not allowed to\n # terminate on this iteration.\n if timestep + 2 <= self.min_steps:\n class_log_probabilities[:, self._end_index] = torch.finfo(class_log_probabilities.dtype).min\n\n # shape: (batch_size * beam_size, num_classes)\n last_predictions_expanded = last_predictions.unsqueeze(-1).expand(\n batch_size * self.beam_size, num_classes\n )\n\n # Here we are finding any beams where we predicted the end token in\n # the previous timestep and replacing the distribution with a\n # one-hot distribution, forcing the beam to predict the end token\n # this timestep as well.\n # shape: (batch_size * beam_size, num_classes)\n cleaned_log_probabilities = torch.where(\n last_predictions_expanded == self._end_index,\n log_probs_after_end,\n class_log_probabilities,\n )\n\n # shape (both): (batch_size * beam_size, per_node_beam_size)\n top_log_probabilities, predicted_classes, sampler_state = self.sampler.sample_nodes(\n cleaned_log_probabilities, self.per_node_beam_size, sampler_state\n )\n\n # Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)\n # so that we can add them to the current log probs for this timestep.\n # This lets us maintain the log probability of each element on the beam.\n # shape: (batch_size * beam_size, per_node_beam_size)\n expanded_last_log_probabilities = (\n last_log_probabilities.unsqueeze(2)\n .expand(batch_size, self.beam_size, self.per_node_beam_size)\n .reshape(batch_size * self.beam_size, self.per_node_beam_size)\n )\n\n # shape: (batch_size * beam_size, per_node_beam_size)\n summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities\n\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_summed = summed_top_log_probabilities.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n\n # shape: (batch_size, beam_size * per_node_beam_size)\n reshaped_predicted_classes = predicted_classes.reshape(\n batch_size, self.beam_size * self.per_node_beam_size\n )\n\n # Keep only the top `beam_size` beam indices.\n # shape (both): (batch_size, beam_size)\n (\n restricted_beam_log_probs,\n restricted_beam_indices,\n sampler_state,\n ) = self.sampler.sample_beams(reshaped_summed, self.beam_size, sampler_state)\n\n # Use the beam indices to extract the corresponding classes.\n # shape: (batch_size, beam_size)\n restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)\n\n predictions.append(restricted_predicted_classes)\n\n # shape: (batch_size, beam_size)\n last_log_probabilities = restricted_beam_log_probs\n\n # The beam indices come from a `beam_size * per_node_beam_size` dimension where the\n # indices with a common ancestor are grouped together. Hence\n # dividing by per_node_beam_size gives the ancestor. (Note that this is integer\n # division as the tensor is a LongTensor.)\n # shape: (batch_size, beam_size)\n backpointer = torch.divide(restricted_beam_indices, self.per_node_beam_size, rounding_mode=\"trunc\")\n backpointers.append(backpointer)\n\n # Keep only the pieces of the state tensors corresponding to the\n # ancestors created this iteration.\n self._update_state(state, backpointer)\n\n for i, constraint in enumerate(self.constraints):\n constraint_states[i] = constraint.update_state(\n constraint_states[i], restricted_predicted_classes, last_backpointer=backpointer\n )\n\n # Warn about \"-inf\" log probabilities if not using any constraints (negligible\n # log probabilities are expected when using constraints).\n if not self.constraints and (\n not torch.isfinite(last_log_probabilities).all()\n or (last_log_probabilities == torch.finfo(last_log_probabilities.dtype).min).any()\n ):\n warnings.warn(\n \"Negligible log probabilities encountered ('-inf' or equivalent). \"\n \"Some final sequences may not make sense. \"\n \"This can happen when the beam size is larger than the number of valid (non-zero \"\n \"probability) transitions that the step function produces.\",\n RuntimeWarning,\n )\n\n reconstructed_predictions = self._reconstruct_sequences(predictions, backpointers)\n\n # shape: (batch_size, beam_size, max_steps)\n all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)\n\n # Calculate the final sequence scores\n # shape: (batch_size, beam_size)\n final_scores = self.final_sequence_scorer.score(all_predictions, last_log_probabilities, self._end_index)\n\n # Sort the sequences based on the final scores so the best scoring\n # sequence is at index 0\n sorted_final_scores, sorted_indices = torch.sort(final_scores, dim=1, descending=True)\n sorted_all_predictions = torch.gather(\n all_predictions, 1, sorted_indices.unsqueeze(-1).expand_as(all_predictions)\n )\n\n return sorted_all_predictions, sorted_final_scores\n\n def _update_initial_state(self, state: StateType, batch_size: int):\n \"\"\"\n Expand tensors in a state dictionary from `(batch_size, *)` to `(batch_size * beam_size, *)`.\n \"\"\"\n for key, state_tensor in state.items():\n if state_tensor is None:\n continue\n # shape: (batch_size * beam_size, *)\n _, *last_dims = state_tensor.size()\n state[key] = (\n state_tensor.unsqueeze(1)\n .expand(batch_size, self.beam_size, *last_dims)\n .reshape(batch_size * self.beam_size, *last_dims)\n )\n\n def _update_state(self, state: StateType, backpointer: torch.Tensor):\n batch_size = backpointer.size()[0]\n\n for key, state_tensor in state.items():\n if state_tensor is None:\n continue\n _, *last_dims = state_tensor.size()\n # shape: (batch_size, beam_size, *)\n expanded_backpointer = backpointer.view(batch_size, self.beam_size, *([1] * len(last_dims))).expand(\n batch_size, self.beam_size, *last_dims\n )\n # shape: (batch_size * beam_size, *)\n state[key] = (\n state_tensor.reshape(batch_size, self.beam_size, *last_dims)\n .gather(1, expanded_backpointer)\n .reshape(batch_size * self.beam_size, *last_dims)\n )"
},
{
"identifier": "Constraint",
"path": "olmo/beam_search.py",
"snippet": "class Constraint:\n \"\"\"\n An abstract class that can be used to enforce constraints on the output predictions\n by manipulating the class log probabilities during beam search.\n\n A `Constraint` just has three methods that need to be implemented by subclasses:\n `init_state()`, `apply()` and `_update_state()`.\n\n `init_state()` takes one argument:\n\n - the batch size, an int\n\n It returns a constraint state, which is a nested list of dictionaries, with any state needed for subsequent\n calls to `apply()` and `update_state()`. The length of the outer list should be equal to `batch_size`.\n Each inner list should be of length 1.\n\n `apply()` takes two arguments:\n\n - the constraint state, which is a nested list of dictionaries. The length of the outer list is `batch_size`\n and the length of each inner list is `beam_size` except on the first time `apply()` is called when it is 1.\n - `class_log_probabilities`, a tensor of shape `(batch_size, beam_size, num_classes)` that contains the\n log probabilities for the classes during search. The first time `apply()` is called, `beam_size = 1`.\n\n The `apply()` method should return new `class_log_probabilities` that enforce the constraint\n for this step of beam search. For instance, it may prevent a specific class from being selected by setting\n the corresponding log probability to a negligible value such as `float(\"-inf\")` or\n `torch.finfo(class_log_probabilities.dtype).min`.\n\n `_update_state()` takes two arguments:\n\n - the copied parent constraint state, which is a nested list of dictionaries. `state[i][j]` contains the\n copied state for the parent of `last_prediction[i, j]`. It is unique to that batch and beam, so it can be\n directly edited in-place without affecting the others.\n - last_prediction, a tensor of shape `(batch_size, beam_size)` containing the predictions from the last\n step of beam search.\n\n The `_update_state()` function should return a new constraint state, a nested list of dictionaries of\n length `batch_size` and inner list of length `beam_size`, one for each of the predictions in `last_prediction`.\n\n \"\"\"\n\n @abstractmethod\n def init_state(\n self,\n batch_size: int,\n ) -> ConstraintStateType:\n raise NotImplementedError\n\n @abstractmethod\n def apply(\n self,\n state: ConstraintStateType,\n class_log_probabilities: torch.Tensor,\n ) -> torch.Tensor:\n raise NotImplementedError\n\n @staticmethod\n def _copy_state(\n state: ConstraintStateType,\n batch_size: int,\n beam_size: int,\n last_backpointer: Optional[torch.Tensor] = None,\n ) -> ConstraintStateType:\n \"\"\"\n Copies the `state` . This method copies the data in `state` using `copy.deepcopy()`. If this\n is not appropriate for your constraint, you will need to implement the copying yourself.\n \"\"\"\n new_state = []\n for i in range(batch_size):\n batch_state = []\n for j in range(beam_size):\n if last_backpointer is None:\n # This is the first prediction, so the backpointer is 0\n backpointer = 0\n else:\n backpointer = last_backpointer[i, j].item()\n batch_state.append(copy.deepcopy(state[i][backpointer])) # type: ignore\n new_state.append(batch_state)\n return new_state\n\n def update_state(\n self,\n state: ConstraintStateType,\n last_prediction: torch.Tensor,\n last_backpointer: Optional[torch.Tensor] = None,\n ) -> ConstraintStateType:\n batch_size, beam_size = last_prediction.size()\n new_state = self._copy_state(state, batch_size, beam_size, last_backpointer)\n return self._update_state(new_state, last_prediction)\n\n @abstractmethod\n def _update_state(\n self,\n state: ConstraintStateType,\n last_prediction: torch.Tensor,\n ) -> ConstraintStateType:\n raise NotImplementedError"
},
{
"identifier": "FinalSequenceScorer",
"path": "olmo/beam_search.py",
"snippet": "class FinalSequenceScorer:\n \"\"\"\n An abstract class that can be used to score the final generated sequences found\n by beam search. Given the predicted sequences and the corresponding log probabilities of\n those sequences, the class calculates and returns the final score of the sequences.\n\n The default implementation scores the sequences using the sum of the log probabilities of\n the sequence, which is passed as input.\n \"\"\"\n\n @abstractmethod\n def score(self, predictions: torch.Tensor, log_probabilities: torch.Tensor, end_index: int) -> torch.Tensor:\n \"\"\"\n Score the final predictions found by beam search.\n Returns a tensor of the final sequence scores of shape `(batch_size, beam_size)`.\n\n :param predictions: A tensor containing the initial predictions with shape `(batch_size, beam_size, max_steps)`.\n :param log_probabilities: A tensor containing the log probabilities of the sequence, defined as the sum\n of the log probabilities per token, with shape `(batch_size, beam_size)`.\n :param end_index: The index of the end symbol.\n\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "Sampler",
"path": "olmo/beam_search.py",
"snippet": "class Sampler:\n \"\"\"\n An abstract class that can be used to sample candidates (either nodes or beams)\n within `BeamSearch`.\n\n A `Sampler` just has three methods, `init_state()`, `sample_nodes()` and `sample_beams()`.\n\n `init_state()` takes three arguments:\n\n - a tensor of starting log probs with shape `(batch_size,, num_classes)`,\n - the batch size, an int,\n - and the number of classes, also an int.\n\n It returns a state dictionary with any state tensors needed for subsequent\n calls to `sample_nodes()` and `sample_beams()`.\n\n By default this method just returns an empty dictionary.\n\n Both `sample_nodes()` and `sample_beams()` should take three arguments:\n\n - tensor of normalized log probabilities with shape `(batch_size, num_examples)`,\n - an integer representing the number of samples to take for each example in the batch,\n - and a state dictionary which could contain any tensors needed for the `Sampler` to keep\n track of state.\n\n For `sample_nodes()`, `num_examples = num_classes`, but for `sample_beams`,\n `num_examples = beam_size * per_node_beam_size`.\n\n The return value should be a tuple containing:\n\n - a tensor of log probabilities of the sampled examples with shape `(batch_size, num_samples)`,\n - a tensor of indices of the sampled examples with shape `(batch_size, num_samples)`,\n - and the updated state dictionary.\n\n A default implementation of `sample_beams` is provided, which just deterministically\n picks the `k` examples with highest log probability.\n \"\"\"\n\n def init_state(\n self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int\n ) -> StateType:\n del start_class_log_probabilities, batch_size, num_classes\n return {}\n\n @abstractmethod\n def sample_nodes(\n self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType\n ) -> Tuple[torch.Tensor, torch.Tensor, StateType]:\n raise NotImplementedError\n\n def sample_beams(\n self, log_probs: torch.Tensor, beam_size: int, state: StateType\n ) -> Tuple[torch.Tensor, torch.Tensor, StateType]:\n del state\n selected_log_probs, selected_indices = torch.topk(log_probs, beam_size, dim=-1)\n return selected_log_probs, selected_indices, {}"
},
{
"identifier": "ActivationType",
"path": "olmo/config.py",
"snippet": "class ActivationType(StrEnum):\n gelu = \"gelu\"\n relu = \"relu\"\n swiglu = \"swiglu\""
},
{
"identifier": "BlockType",
"path": "olmo/config.py",
"snippet": "class BlockType(StrEnum):\n sequential = \"sequential\"\n parallel = \"parallel\""
},
{
"identifier": "LayerNormType",
"path": "olmo/config.py",
"snippet": "class LayerNormType(StrEnum):\n default = \"default\"\n \"\"\"\n The default LayerNorm implementation, equivalent to PyTorch's built-in version.\n \"\"\"\n\n low_precision = \"low_precision\"\n \"\"\"\n A low-precision version of the default LayerNorm.\n \"\"\"\n\n rms = \"rms\"\n \"\"\"\n An RMSNorm implementation. When using ``torch.compile`` this is\n probably the fastest implementation.\n \"\"\"\n\n low_precision_rms = \"low_precision_rms\"\n \"\"\"\n A low-precision version of RMSNorm.\n \"\"\""
},
{
"identifier": "ModelConfig",
"path": "olmo/config.py",
"snippet": "class ModelConfig(BaseConfig):\n \"\"\"\n OLMo (model) configuration.\n \"\"\"\n\n # Note that the defaults for these attributes are equivalent to the base GPT2 model.\n\n d_model: int = 768\n \"\"\"\n The hidden size of the model.\n \"\"\"\n\n n_heads: int = 12\n \"\"\"\n The number of self-attention heads.\n \"\"\"\n\n n_layers: int = 12\n \"\"\"\n The number of layers/blocks.\n \"\"\"\n\n mlp_ratio: int = 4\n \"\"\"\n The ratio of the inner MLP dimensionality to ``d_model``.\n \"\"\"\n\n activation_type: ActivationType = ActivationType.swiglu\n \"\"\"\n The activation function to use within the MLP layers.\n \"\"\"\n\n block_type: BlockType = BlockType.sequential\n \"\"\"\n The transformer block implementation.\n \"\"\"\n\n alibi: bool = False\n \"\"\"\n If ``True``, use ALiBi embeddings. Mutually exclusive with ``rope``.\n \"\"\"\n\n alibi_bias_max: float = 8.0\n \"\"\"\n Maximum absolute value of ALiBi bias.\n \"\"\"\n\n rope: bool = False\n \"\"\"\n Use rotary positional embeddings (RoPE). Mutually exclusive with ``alibi``.\n \"\"\"\n\n flash_attention: bool = False\n \"\"\"\n If ``True``, use ``FlashAttention``.\n \"\"\"\n\n attention_dropout: float = 0.1\n \"\"\"\n The dropout probability within the attention modules.\n \"\"\"\n\n multi_query_attention: bool = False\n \"\"\"\n Use the Multi-Query formulation of attention used in PaLM. This reduces the number of parameters\n and is more efficient during inference.\n \"\"\"\n\n attention_layer_norm: bool = False\n \"\"\"\n Apply layer norm to the keys and queries within the attention mechanism.\n This can help stabilize training.\n \"\"\"\n\n residual_dropout: float = 0.1\n \"\"\"\n The dropout probability for the MLP and attention output within each block.\n \"\"\"\n\n embedding_dropout: float = 0.1\n \"\"\"\n The dropout probability for embeddings.\n \"\"\"\n\n layer_norm_type: LayerNormType = LayerNormType.default\n \"\"\"\n The layernorm implementation to use.\n \"\"\"\n\n max_sequence_length: int = 1024\n \"\"\"\n The maximum input sequence length supported by the model.\n \"\"\"\n\n include_bias: bool = True\n \"\"\"\n Whether or not to include bias parameters in linear layers.\n In PaLM, they got rid of all bias terms because they found that large\n models tend to have near 0 bias terms anyway.\n \"\"\"\n\n vocab_size: int = 50257\n \"\"\"\n Vocabulary size of the model.\n \"\"\"\n\n embedding_size: Optional[int] = 50304\n \"\"\"\n The number of embeddings, i.e. the number of tokens. If set to ``None`` it will default\n to ``vocab_size``. If ``vocab_size`` is not a multiple of 128, setting this to the\n next multiple of 128 that's greater than ``vocab_size`` can improve throughput\n substantially.\n \"\"\"\n\n eos_token_id: int = 50256\n \"\"\"\n The ID of the end-of-sentence special token.\n \"\"\"\n\n pad_token_id: int = 50256\n \"\"\"\n The ID of the token to use for padding. Defaults to the ID of the EOS token.\n \"\"\"\n\n init_device: Optional[str] = None\n \"\"\"\n The torch device to use when initializing the model parameters, e.g. \"cpu\", \"cuda:0\", \"meta\".\n \"\"\"\n\n init_std: float = 0.02\n \"\"\"\n Standard deviation used when initializing parameters.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision used to train/evaluate with. You shouldn't set this directly.\n See :data:`TrainConfig.precision` instead.\n \"\"\""
},
{
"identifier": "OlmoConfigurationError",
"path": "olmo/exceptions.py",
"snippet": "class OlmoConfigurationError(OlmoError):\n \"\"\"\n An error with a configuration file.\n \"\"\""
}
] | import math
import os
import torch
import torch.backends.cuda
import torch.nn as nn
import torch.nn.functional as F
import warnings
from abc import abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple, cast
from torch import einsum
from .aliases import PathOrStr
from .beam_search import BeamSearch, Constraint, FinalSequenceScorer, Sampler
from .config import ActivationType, BlockType, LayerNormType, ModelConfig
from .exceptions import OlmoConfigurationError
from functools import partial
from cached_path import cached_path | 10,784 | self.k_norm: Optional[LayerNormBase] = None
self.q_norm: Optional[LayerNormBase] = None
if config.attention_layer_norm:
self.k_norm = LayerNormBase.build(
config, size=config.d_model // config.n_heads if config.multi_query_attention else None
)
self.q_norm = LayerNormBase.build(config)
# Activation function.
self.act = Activation.build(config)
assert (self.act.output_multiplier * config.mlp_ratio * config.d_model) % 1 == 0
# Attention output projection.
self.attn_out = nn.Linear(
config.d_model, config.d_model, bias=config.include_bias, device=config.init_device
)
# Feed-forward output projection.
self.ff_out = nn.Linear(
int(self.act.output_multiplier * config.mlp_ratio * config.d_model),
config.d_model,
bias=config.include_bias,
device=config.init_device,
)
self.ff_out._is_residual = True # type: ignore
# Rotary embeddings.
if self.config.rope:
self.rotary_emb = RotaryEmbedding(config)
self.register_buffer(
"pos_emb", self.rotary_emb(config.max_sequence_length, device=config.init_device), persistent=False
)
def get_rotary_embedding(self, seq_len: int, device: Optional[torch.device]) -> torch.Tensor:
if self.pos_emb is not None and self.pos_emb.shape[-2] >= seq_len: # type: ignore
return self.pos_emb[:seq_len] # type: ignore
pos_emb = self.rotary_emb(seq_len, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def attention(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_bias: Optional[torch.Tensor] = None,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
B, T, C = q.size() # batch size, sequence length, d_model
dtype = k.dtype
# Optionally apply layer norm to keys and queries.
if self.q_norm is not None and self.k_norm is not None:
q = self.q_norm(q).to(dtype=dtype)
k = self.k_norm(k).to(dtype=dtype)
# Move head forward to be next to the batch dim.
# shape: (B, nh, T, hs)
q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if self.config.multi_query_attention:
# shape: (B, 1, T, hs)
k = k.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
# shape: (B, 1, T, hs)
v = v.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
else:
# shape: (B, nh, T, hs)
k = k.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
# shape: (B, nh, T, hs)
v = v.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
if use_cache:
present = (k, v)
else:
present = None
query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None
if self.config.rope:
# Apply rotary embeddings.
positions = self.get_rotary_embedding(key_len, q.device)
q = apply_rotary_pos_emb(positions[key_len - query_len : key_len], q)
k = apply_rotary_pos_emb(positions, k)
if attention_bias is not None:
attention_bias = attention_bias[:, :, key_len - query_len : key_len, :key_len]
# Get the attention scores.
# shape: (B, nh, T, hs)
att = F.scaled_dot_product_attention(
q,
k,
v,
attn_mask=None if attention_bias is None else attention_bias.to(dtype=dtype),
dropout_p=0.0 if not self.training else self.config.attention_dropout,
is_causal=attention_bias is None,
)
# Re-assemble all head outputs side-by-side.
att = att.transpose(1, 2).contiguous().view(B, T, C)
# Apply output projection.
return self.attn_out(att), present
@abstractmethod
def forward(
self,
x: torch.Tensor,
attention_bias: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> OlmoBlock:
| """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module):
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig, size: Optional[int] = None) -> LayerNormBase:
if config.layer_norm_type == LayerNormType.default:
return LayerNorm(config, size=size, low_precision=False)
elif config.layer_norm_type == LayerNormType.low_precision:
return LayerNorm(config, size=size, low_precision=True)
elif config.layer_norm_type == LayerNormType.rms:
return RMSLayerNorm(config, size=size, low_precision=False)
elif config.layer_norm_type == LayerNormType.low_precision_rms:
return RMSLayerNorm(config, size=size, low_precision=True)
else:
raise NotImplementedError(f"Not sure how to handle '{config.layer_norm_type}' LayerNorm type")
def _cast_if_autocast_enabled(self, tensor: torch.Tensor) -> torch.Tensor:
if torch.is_autocast_enabled():
if tensor.device.type == "cuda":
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == "cpu":
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
class LayerNorm(LayerNormBase):
"""
The default :class:`LayerNorm` implementation which can optionally run in low precision.
"""
def __init__(self, config: ModelConfig, size: Optional[int] = None, low_precision: bool = False):
super().__init__(config)
self.normalized_shape = (size or config.d_model,)
self.eps = 1e-05
self.weight = nn.Parameter(torch.ones(self.normalized_shape, device=config.init_device))
self.bias = nn.Parameter(torch.zeros(self.normalized_shape, device=config.init_device))
self.low_precision = low_precision
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.low_precision:
module_device = x.device
downcast_x = self._cast_if_autocast_enabled(x)
downcast_weight = (
self._cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
)
downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return F.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
else:
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
class RMSLayerNorm(LayerNorm):
"""
RMS layer norm, a simplified :class:`LayerNorm` implementation that can optionally run
in low-precision.
"""
def __init__(self, config: ModelConfig, size: Optional[int] = None, low_precision: bool = False):
super().__init__(config)
self.eps = 1e-08
self.size = size or config.d_model
self.weight = nn.Parameter(torch.ones(self.config.d_model))
if self.config.include_bias:
self.bias = nn.Parameter(torch.zeros(self.config.d_model))
else:
self.register_parameter("bias", None)
self.low_precision = low_precision
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.low_precision:
module_device = x.device
downcast_x = self._cast_if_autocast_enabled(x)
downcast_weight = self._cast_if_autocast_enabled(self.weight)
downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.config.include_bias else None
with torch.autocast(enabled=False, device_type=module_device.type):
return self.rms_norm(downcast_x, downcast_weight, downcast_bias)
else:
return self.rms_norm(x, self.weight, self.bias if self.config.include_bias else None)
def rms_norm(self, x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
norm_x = x.norm(2, dim=-1, keepdim=True)
rms_x = norm_x * self.size ** (-1.0 / 2)
x_normed = x / (rms_x + self.eps)
if bias is not None:
return weight * x_normed + self.bias
else:
return weight * x_normed
class RotaryEmbedding(nn.Module):
"""
[Rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864).
"""
def __init__(self, config: ModelConfig):
super().__init__()
dim = config.d_model // config.n_heads
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=config.init_device).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype) # type: ignore
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x: torch.Tensor) -> torch.Tensor:
B, nh, T, hs = x.size()
x = x.view(B, nh, T, 2, hs // 2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
out = (t * pos.cos()) + (rotate_half(t) * pos.sin())
return out.to(t.dtype)
class Activation(nn.Module):
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@property
@abstractmethod
def output_multiplier(self) -> float:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> Activation:
if config.activation_type == ActivationType.gelu:
return cast(Activation, GELU(approximate="none"))
elif config.activation_type == ActivationType.relu:
return cast(Activation, ReLU(inplace=False))
elif config.activation_type == ActivationType.swiglu:
return SwiGLU(config)
else:
raise NotImplementedError(f"not sure how to handle activation type '{config.activation_type}'")
class GELU(nn.GELU):
@property
def output_multiplier(self) -> float:
return 1.0
class ReLU(nn.ReLU):
@property
def output_multiplier(self) -> float:
return 1.0
class SwiGLU(Activation):
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
@property
def output_multiplier(self) -> float:
return 0.5
class OlmoBlock(nn.Module):
"""
A base class for transformer block implementations.
"""
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
assert config.d_model % config.n_heads == 0
# Dropout.
self.dropout = nn.Dropout(config.residual_dropout)
# Layer norms.
self.k_norm: Optional[LayerNormBase] = None
self.q_norm: Optional[LayerNormBase] = None
if config.attention_layer_norm:
self.k_norm = LayerNormBase.build(
config, size=config.d_model // config.n_heads if config.multi_query_attention else None
)
self.q_norm = LayerNormBase.build(config)
# Activation function.
self.act = Activation.build(config)
assert (self.act.output_multiplier * config.mlp_ratio * config.d_model) % 1 == 0
# Attention output projection.
self.attn_out = nn.Linear(
config.d_model, config.d_model, bias=config.include_bias, device=config.init_device
)
# Feed-forward output projection.
self.ff_out = nn.Linear(
int(self.act.output_multiplier * config.mlp_ratio * config.d_model),
config.d_model,
bias=config.include_bias,
device=config.init_device,
)
self.ff_out._is_residual = True # type: ignore
# Rotary embeddings.
if self.config.rope:
self.rotary_emb = RotaryEmbedding(config)
self.register_buffer(
"pos_emb", self.rotary_emb(config.max_sequence_length, device=config.init_device), persistent=False
)
def get_rotary_embedding(self, seq_len: int, device: Optional[torch.device]) -> torch.Tensor:
if self.pos_emb is not None and self.pos_emb.shape[-2] >= seq_len: # type: ignore
return self.pos_emb[:seq_len] # type: ignore
pos_emb = self.rotary_emb(seq_len, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def attention(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_bias: Optional[torch.Tensor] = None,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
B, T, C = q.size() # batch size, sequence length, d_model
dtype = k.dtype
# Optionally apply layer norm to keys and queries.
if self.q_norm is not None and self.k_norm is not None:
q = self.q_norm(q).to(dtype=dtype)
k = self.k_norm(k).to(dtype=dtype)
# Move head forward to be next to the batch dim.
# shape: (B, nh, T, hs)
q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if self.config.multi_query_attention:
# shape: (B, 1, T, hs)
k = k.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
# shape: (B, 1, T, hs)
v = v.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
else:
# shape: (B, nh, T, hs)
k = k.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
# shape: (B, nh, T, hs)
v = v.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
if use_cache:
present = (k, v)
else:
present = None
query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None
if self.config.rope:
# Apply rotary embeddings.
positions = self.get_rotary_embedding(key_len, q.device)
q = apply_rotary_pos_emb(positions[key_len - query_len : key_len], q)
k = apply_rotary_pos_emb(positions, k)
if attention_bias is not None:
attention_bias = attention_bias[:, :, key_len - query_len : key_len, :key_len]
# Get the attention scores.
# shape: (B, nh, T, hs)
att = F.scaled_dot_product_attention(
q,
k,
v,
attn_mask=None if attention_bias is None else attention_bias.to(dtype=dtype),
dropout_p=0.0 if not self.training else self.config.attention_dropout,
is_causal=attention_bias is None,
)
# Re-assemble all head outputs side-by-side.
att = att.transpose(1, 2).contiguous().view(B, T, C)
# Apply output projection.
return self.attn_out(att), present
@abstractmethod
def forward(
self,
x: torch.Tensor,
attention_bias: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> OlmoBlock: | if config.block_type == BlockType.sequential: | 6 | 2023-11-14 02:24:07+00:00 | 12k |
1in-oos/ccplus | caringcaribou/tests/test_module_uds.py | [
{
"identifier": "Constants",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n # the SI values. The NR_SI value is not used as a SI value in order to\n # make A_Data coding and decoding easier.\"\n NR_SI = 0x7F"
},
{
"identifier": "Iso14229_1",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Iso14229_1(object):\n P3_CLIENT = 5\n\n def __init__(self, tp):\n self.tp = tp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n @staticmethod\n def get_service_response_id(request_id):\n \"\"\"\n Returns the service response ID for the given request ID\n\n :param request_id: Request service ID\n :return: Corresponding response service ID\n \"\"\"\n return request_id + 0x40\n\n @staticmethod\n def get_service_request_id(response_id):\n \"\"\"\n Returns the service request ID for the given response ID\n\n :param response_id: Response service ID\n :return: Corresponding request service ID\n \"\"\"\n return response_id - 0x40\n\n def send_request(self, data):\n \"\"\"\n Sends a request message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_request(data)\n\n def send_response(self, data):\n \"\"\"\n Sends a response message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_response(data)\n\n def receive_response(self, wait_window):\n \"\"\"\n Attempts to receive a response through the underlying TP layer\n\n :param wait_window: Minimum time (in seconds) to wait before timeout\n :return: The received response if successful,\n None otherwise\n \"\"\"\n start_time = time.process_time()\n while True:\n current_time = time.process_time()\n if (current_time - start_time) > wait_window:\n return None\n\n response = self.tp.indication(wait_window)\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\n if response is not None and len(response) >= 3:\n if (response[0] == Constants.NR_SI and\n response[2] == NRC_RCRRP):\n continue\n break\n return response\n\n @staticmethod\n def is_positive_response(response):\n \"\"\"\n Returns a bool indicating whether 'response' is positive\n\n :param response: ISO-14229-1 response data\n :return: False if response is a NEGATIVE_RESPONSE,\n True otherwise\n \"\"\"\n if (response is not None and\n len(response) > 0 and\n response[0] != Constants.NR_SI):\n return True\n return False\n\n def read_data_by_identifier(self, identifier):\n \"\"\"\n Sends a \"read data by identifier\" request for 'identifier'\n\n :param identifier: Data identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n response = []\n num_dids = len(identifier)\n if num_dids > 0:\n request = [0] * ((num_dids * 2) + 1)\n request[0] = ServiceID.READ_DATA_BY_IDENTIFIER\n for i in range(0, num_dids):\n request[i * 2 + 1] = (identifier[i] >> 8) & 0xFF\n request[i * 2 + 2] = identifier[i] & 0xFF\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n return response\n\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n \"\"\"\n Sends a \"read memory by address\" request for 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.READ_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n \"\"\"\n Sends a \"write memory by address\" request to write 'data' to\n 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :param data: The data to write to 'memory_address'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.WRITE_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_data_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"write data by identifier\" request to write 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data to write to 'identifier'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.WRITE_DATA_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def input_output_control_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"input output control by identifier\" request for 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n \"\"\"\n Sends a \"dynamically define data identifier\" request for\n 'identifier'\n\n :param identifier: DDDID to set\n :param sub_function: Sub function\n :param sub_function_arg: Sub function arguments\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (identifier is None or\n sub_function is None or\n sub_function_arg is None):\n return None\n\n request = [0] * (1 + 1 + 2 + len(sub_function_arg) * 4)\n request[0] = ServiceID.DYNAMICALLY_DEFINE_DATA_IDENTIFIER\n request[1] = sub_function\n request[2] = (identifier >> 8) & 0xFF\n request[3] = identifier & 0xFF\n\n offset = 4\n for did in sub_function_arg:\n request[offset + 0] = (did.sourceDataIdentifier >> 8) & 0xFF\n request[offset + 1] = did.sourceDataIdentifier & 0xFF\n request[offset + 2] = did.positionInSourceDataRecord\n request[offset + 3] = did.memorySize\n offset += 4\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def diagnostic_session_control(self, session_type):\n \"\"\"\n Sends a \"DiagnosticSessionControl\" request for specified session\n type\n\n :param session_type: Indicates which kind of session should be\n requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n request[1] = session_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def ecu_reset(self, reset_type):\n \"\"\"\n Sends an \"ECU reset\" request for specified reset type\n\n :param reset_type: Indicates which kind of reset should be requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.ECU_RESET\n request[1] = reset_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_request_seed(self, level, data_record=None):\n \"\"\"\n Sends a Security Access \"Request seed\" message for 'level'\n\n :param level: Security Access Type level to send request seed for\n :param data_record: Optional data to transmit when requesting seed,\n e.g. client identification\n :return: Response data (containing seed) if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n if data_record:\n for data_record in data_record:\n request.append(data_record)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_send_key(self, level, key):\n \"\"\"\n Sends a Security Access \"Send key\" message with 'key' for 'level'\n\n :param level: Security Access Type level to send key for\n :param key: Key to transmit\n :return: Response data if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n for key_byte in key:\n request.append(key_byte)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n \"\"\"\n Sends a \"read data by periodic identifier\" request for 'identifier'\n\n :param transmission_mode: Transmission mode\n :param identifier: Identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (transmission_mode is None or\n identifier is None or\n len(identifier) == 0):\n return None\n\n request = [0] * (2 + len(identifier))\n request[0] = ServiceID.READ_DATA_BY_PERIODIC_IDENTIFIER\n request[1] = transmission_mode\n\n for i in range(0, len(identifier)):\n request[2 + i] = identifier[i]\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response"
},
{
"identifier": "NegativeResponseCodes",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class NegativeResponseCodes(object):\n \"\"\"\n ISO-14229-1 negative response codes\n \"\"\"\n POSITIVE_RESPONSE = 0x00\n # 0x01-0x0F ISO SAE Reserved\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n # 0x15-0x20 ISO SAE Reserved\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n # 0x23 ISO SAE Reserved\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n # 0x27-0x30 ISO SAE Reserved\n REQUEST_OUT_OF_RANGE = 0x31\n # 0x32 ISO SAE Reserved\n SECURITY_ACCESS_DENIED = 0x33\n # 0x34 ISO SAE Reserved\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n # 0x38-0x4F Reserved by extended data link security document\n # 0x50-0x6F ISO SAE Reserved\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n # 0x74-0x77 ISO SAE Reserved\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n # 0x79-0x7D ISO SAE Reserved\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n # 0x80 ISO SAE Reserved\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n # 0x8E ISO SAE Reserved\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n # 0x94-0xEF Reserved for specific conditions not correct\n # 0xF0-0xFE Vehicle manufacturer specific conditions not correct\n # 0xFF ISO SAE Reserved"
},
{
"identifier": "ServiceID",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class ServiceID(object):\n \"\"\"\n ISO-14229-1 service ID definitions\n \"\"\"\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87"
},
{
"identifier": "Services",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Services(object):\n \"\"\"Class structure containing service specific constants, sub-function\n parameters and functions\"\"\"\n\n class DiagnosticSessionControl(BaseService):\n\n service_id = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n\n class DiagnosticSessionType(object):\n # 0x00 ISO SAE Reserved\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n # 0x05-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n # 0x60-0x7E System supplier specific\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n # 0x7F ISO SAE Reserved\n\n class EcuReset(BaseService):\n\n service_id = ServiceID.ECU_RESET\n\n class ResetType(object):\n # 0x00 ISO SAE Reserved\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n # 0x06-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n # 0x60-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n class SecurityAccess(BaseService):\n\n service_id = ServiceID.SECURITY_ACCESS\n\n class RequestSeedOrSendKey(object):\n \"\"\"\n These are lined up so that value X \"request seed level N\" has\n a matching \"send key level N\" at value X+1.\n\n 0x01 is Request seed level 0x01\n 0x02 is Send key level 0x01\n 0x03 is Request seed level 0x02\n 0x04 is Send key level 0x02\n (...)\n 0x41 is Request seed level 0x21\n 0x42 is Send key level 0x21\n\n The security levels numbering is arbitrary and does not imply\n any relationship between the levels.\n \"\"\"\n\n # 0x00 ISO SAE Reserved\n # 0x01-0x42 Vehicle manufacturer specific request\n # seed/send key pairs\n # 0x43-0X5E ISO SAE Reserved\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n # 0x61-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n\n def is_valid_request_seed_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid request seed\n value and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__REQUEST_SEED_MIN\n <= value <= self.__REQUEST_SEED_MAX)\n is_odd = value % 2 == 1\n return valid_interval and is_odd\n\n def is_valid_send_key_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid send key value\n and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__SEND_KEY_MIN\n <= value <= self.__SEND_KEY_MAX)\n is_even = value % 2 == 0\n return valid_interval and is_even\n\n @staticmethod\n def get_send_key_for_request_seed(seed):\n return seed + 1\n\n class TesterPresent(BaseService):\n\n service_id = ServiceID.TESTER_PRESENT"
},
{
"identifier": "MockEcuIso14229",
"path": "caringcaribou/tests/mock/mock_ecu_uds.py",
"snippet": "class MockEcuIso14229(MockEcuIsoTp, MockEcu):\n \"\"\"ISO-14229-1 (Unified Diagnostic Services) mock ECU handler\"\"\"\n\n IDENTIFIER_REQUEST_POSITIVE = 0x01\n IDENTIFIER_REQUEST_POSITIVE_RESPONSE = 0x72\n IDENTIFIER_REQUEST_NEGATIVE = 0x02\n\n REQUEST_IDENTIFIER_VALID = 0xA001\n REQUEST_IDENTIFIER_INVALID = 0xA002\n REQUEST_VALUE = [0xC0, 0xFF, 0xEE]\n\n REQUEST_ADDRESS_LENGTH_AND_FORMAT = 0x22\n REQUEST_ADDRESS = 0x0001\n REQUEST_DATA_SIZE = 0x10\n DATA = list(range(0x14))\n\n # TODO Use dynamic seed value and verify keys using a simple algorithm\n SECURITY_ACCESS_SEED = [0x36, 0x57]\n SECURITY_ACCESS_KEY = [0xC9, 0xA9]\n\n def __init__(self, arb_id_request, arb_id_response, bus=None):\n MockEcu.__init__(self, bus)\n self.ARBITRATION_ID_ISO_14229_REQUEST = arb_id_request\n self.ARBITRATION_ID_ISO_14229_RESPONSE = arb_id_response\n # Set CAN filter to only listen to incoming requests on the correct arbitration ID\n arbitration_id_filter = [{\"can_id\": arb_id_request, \"can_mask\": 0x1fffffff}]\n self.bus.set_filters(arbitration_id_filter)\n # Setup ISO-TP using the filtered bus\n self.iso_tp = IsoTp(arb_id_request=self.ARBITRATION_ID_ISO_14229_REQUEST,\n arb_id_response=self.ARBITRATION_ID_ISO_14229_RESPONSE,\n bus=self.bus)\n # Setup diagnostics on top of ISO-TP\n self.diagnostics = Iso14229_1(tp=self.iso_tp)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n MockEcuIsoTp.__exit__(self, None, None, None)\n\n @staticmethod\n def create_positive_response(request_service_id, response_data=None):\n \"\"\"\n Returns data for a positive response of 'request_service_id' with an optional 'response_data' payload\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param response_data: List of data bytes to transmit in the response\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n # Positive response uses a response service ID (SIDPR) based on the request service ID (SIDRQ)\n service_response_id = Iso14229_1.get_service_response_id(request_service_id)\n response = [service_response_id]\n # Append payload\n if response_data is not None:\n response += response_data\n return response\n\n @staticmethod\n def create_negative_response(request_service_id, nrc):\n \"\"\"\n Returns data for a negative response of 'request_service_id' with negative response code 'nrc'\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param nrc: Negative response code (NRC_)\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n response = [Constants.NR_SI,\n request_service_id,\n nrc]\n return response\n\n def message_handler(self, data):\n \"\"\"\n Logic for responding to incoming messages\n\n :param data: list of data bytes in incoming message\n :return: None\n \"\"\"\n assert isinstance(data, list)\n try:\n service_id = data[0]\n # Handle different services\n if service_id == ServiceID.DIAGNOSTIC_SESSION_CONTROL:\n # 0x10 Diagnostic session control\n response_data = self.handle_diagnostic_session_control(data)\n elif service_id == ServiceID.ECU_RESET:\n # 0x11 ECU reset\n response_data = self.handle_ecu_reset(data)\n elif service_id == ServiceID.READ_DATA_BY_IDENTIFIER:\n # 0x22 Read data by identifier\n response_data = self.handle_read_data_by_identifier(data)\n elif service_id == ServiceID.READ_MEMORY_BY_ADDRESS:\n # 0x23 Read memory by address\n response_data = self.handle_read_memory_by_address(data)\n elif service_id == ServiceID.SECURITY_ACCESS:\n # 0x27 Security access\n response_data = self.handle_security_access(data)\n elif service_id == ServiceID.WRITE_DATA_BY_IDENTIFIER:\n # 0x2E Write data by identifier\n response_data = self.handle_write_data_by_identifier(data)\n else:\n # Unsupported service\n response_data = self.handle_unsupported_service(data)\n except IndexError:\n # Parsing failed due to invalid message structure\n response_data = self.handle_service_error(data)\n\n # This check makes it possible to support services where a response should not be sent\n if response_data is not None:\n # Simulate a small delay before responding\n time.sleep(self.DELAY_BEFORE_RESPONSE)\n self.diagnostics.send_response(response_data)\n\n def handle_unsupported_service(self, data):\n \"\"\"Provides a standard response for unmapped services, by responding with NRC Service Not Supported\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.SERVICE_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_service_error(self, data):\n \"\"\"Provides a standard response for failed service requests\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_diagnostic_session_control(self, data):\n \"\"\"Evaluates a diagnostic session control request and returns a response\"\"\"\n service_id = data[0]\n # TODO Handle different values?\n session_type = data[1]\n response_data = self.create_positive_response(service_id)\n return response_data\n\n def handle_read_data_by_identifier(self, data):\n \"\"\"\n Evaluates a read data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n request = data[2]\n\n if request == self.IDENTIFIER_REQUEST_POSITIVE:\n # Request for positive response\n # TODO Actually read a parameter from memory\n payload = [self.IDENTIFIER_REQUEST_POSITIVE_RESPONSE]\n response_data = self.create_positive_response(service_id, payload)\n elif request == self.IDENTIFIER_REQUEST_NEGATIVE:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_write_data_by_identifier(self, data):\n \"\"\"\n Evaluates a write data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n\n identifier_start_position = 1\n identifier_length = 2\n identifier = int_from_byte_list(data,\n identifier_start_position,\n identifier_length)\n request_data = data[3:]\n # TODO Actually write data to memory\n if identifier == self.REQUEST_IDENTIFIER_VALID:\n # Request for positive response\n # Standard specifies the response payload to be an echo of the data identifier from the request\n payload = data[identifier_start_position:identifier_start_position + identifier_length]\n response_data = self.create_positive_response(service_id, payload)\n elif identifier == self.REQUEST_IDENTIFIER_INVALID:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_read_memory_by_address(self, data):\n \"\"\"\n Evaluates a read memory by address request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n address_field_size = (data[1] >> 4) & 0xF\n data_length_field_size = (data[1] & 0xF)\n address_start_position = 2\n data_length_start_position = 4\n\n start_address = int_from_byte_list(data, address_start_position, address_field_size)\n data_length = int_from_byte_list(data, data_length_start_position, data_length_field_size)\n end_address = start_address + data_length\n if 0 <= start_address <= end_address <= len(self.DATA):\n memory_data = self.DATA[start_address:end_address]\n response_data = self.create_positive_response(service_id, memory_data)\n else:\n nrc = NegativeResponseCodes.REQUEST_OUT_OF_RANGE\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_ecu_reset(self, data):\n \"\"\"\n Evaluates an ECU reset request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n reset_type = subfunction & 0x7F\n suppress_positive_response = subfunction >> 7\n\n reset_types = Services.EcuReset.ResetType\n\n if reset_type in [reset_types.HARD_RESET, reset_types.KEY_OFF_ON_RESET, reset_types.SOFT_RESET]:\n if suppress_positive_response:\n response_data = None\n else:\n response_data = self.create_positive_response(service_id, [reset_type])\n else:\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_security_access(self, data):\n \"\"\"\n Evaluates security access requests (both \"Request seed\" and \"Send key\") and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n level = subfunction & 0x7F\n\n service_handler = Services.SecurityAccess.RequestSeedOrSendKey()\n if service_handler.is_valid_request_seed_level(level):\n # Request seed handling\n payload = [level]\n payload.extend(self.SECURITY_ACCESS_SEED)\n response_data = self.create_positive_response(service_id, payload)\n elif service_handler.is_valid_send_key_level(level):\n # Send key handling\n expected_key = self.SECURITY_ACCESS_KEY\n received_key = data[2:]\n if received_key == expected_key:\n # Correct key\n response_data = self.create_positive_response(service_id, [level])\n else:\n # Invalid key\n nrc = NegativeResponseCodes.INVALID_KEY\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unsupported subfunction\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data"
},
{
"identifier": "uds",
"path": "caringcaribou/modules/uds.py",
"snippet": "UDS_SERVICE_NAMES = {\n 0x10: \"DIAGNOSTIC_SESSION_CONTROL\",\n 0x11: \"ECU_RESET\",\n 0x14: \"CLEAR_DIAGNOSTIC_INFORMATION\",\n 0x19: \"READ_DTC_INFORMATION\",\n 0x20: \"RETURN_TO_NORMAL\",\n 0x22: \"READ_DATA_BY_IDENTIFIER\",\n 0x23: \"READ_MEMORY_BY_ADDRESS\",\n 0x24: \"READ_SCALING_DATA_BY_IDENTIFIER\",\n 0x27: \"SECURITY_ACCESS\",\n 0x28: \"COMMUNICATION_CONTROL\",\n 0x2A: \"READ_DATA_BY_PERIODIC_IDENTIFIER\",\n 0x2C: \"DYNAMICALLY_DEFINE_DATA_IDENTIFIER\",\n 0x2D: \"DEFINE_PID_BY_MEMORY_ADDRESS\",\n 0x2E: \"WRITE_DATA_BY_IDENTIFIER\",\n 0x2F: \"INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\",\n 0x31: \"ROUTINE_CONTROL\",\n 0x34: \"REQUEST_DOWNLOAD\",\n 0x35: \"REQUEST_UPLOAD\",\n 0x36: \"TRANSFER_DATA\",\n 0x37: \"REQUEST_TRANSFER_EXIT\",\n 0x38: \"REQUEST_FILE_TRANSFER\",\n 0x3D: \"WRITE_MEMORY_BY_ADDRESS\",\n 0x3E: \"TESTER_PRESENT\",\n 0x7F: \"NEGATIVE_RESPONSE\",\n 0x83: \"ACCESS_TIMING_PARAMETER\",\n 0x84: \"SECURED_DATA_TRANSMISSION\",\n 0x85: \"CONTROL_DTC_SETTING\",\n 0x86: \"RESPONSE_ON_EVENT\",\n 0x87: \"LINK_CONTROL\"\n}\nNRC_NAMES = {\n 0x00: \"POSITIVE_RESPONSE\",\n 0x10: \"GENERAL_REJECT\",\n 0x11: \"SERVICE_NOT_SUPPORTED\",\n 0x12: \"SUB_FUNCTION_NOT_SUPPORTED\",\n 0x13: \"INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\",\n 0x14: \"RESPONSE_TOO_LONG\",\n 0x21: \"BUSY_REPEAT_REQUEST\",\n 0x22: \"CONDITIONS_NOT_CORRECT\",\n 0x24: \"REQUEST_SEQUENCE_ERROR\",\n 0x25: \"NO_RESPONSE_FROM_SUBNET_COMPONENT\",\n 0x26: \"FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION\",\n 0x31: \"REQUEST_OUT_OF_RANGE\",\n 0x33: \"SECURITY_ACCESS_DENIED\",\n 0x35: \"INVALID_KEY\",\n 0x36: \"EXCEEDED_NUMBER_OF_ATTEMPTS\",\n 0x37: \"REQUIRED_TIME_DELAY_NOT_EXPIRED\",\n 0x70: \"UPLOAD_DOWNLOAD_NOT_ACCEPTED\",\n 0x71: \"TRANSFER_DATA_SUSPENDED\",\n 0x72: \"GENERAL_PROGRAMMING_FAILURE\",\n 0x73: \"WRONG_BLOCK_SEQUENCE_COUNTER\",\n 0x78: \"REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\",\n 0x7E: \"SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x7F: \"SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x81: \"RPM_TOO_HIGH\",\n 0x82: \"RPM_TOO_LOW\",\n 0x83: \"ENGINE_IS_RUNNING\",\n 0x84: \"ENGINE_IS_NOT_RUNNING\",\n 0x85: \"ENGINE_RUN_TIME_TOO_LOW\",\n 0x86: \"TEMPERATURE_TOO_HIGH\",\n 0x87: \"TEMPERATURE_TOO_LOW\",\n 0x88: \"VEHICLE_SPEED_TOO_HIGH\",\n 0x89: \"VEHICLE_SPEED_TOO_LOW\",\n 0x8A: \"THROTTLE_PEDAL_TOO_HIGH\",\n 0x8B: \"THROTTLE_PEDAL_TOO_LOW\",\n 0x8C: \"TRANSMISSION_RANGE_NOT_IN_NEUTRAL\",\n 0x8D: \"TRANSMISSION_RANGE_NOT_IN_GEAR\",\n 0x8F: \"BRAKE_SWITCHES_NOT_CLOSED\",\n 0x90: \"SHIFT_LEVER_NOT_IN_PARK\",\n 0x91: \"TORQUE_CONVERTER_CLUTCH_LOCKED\",\n 0x92: \"VOLTAGE_TOO_HIGH\",\n 0x93: \"VOLTAGE_TOO_LOW\"\n}\nDELAY_DISCOVERY = 0.01\nDELAY_TESTER_PRESENT = 0.5\nDELAY_SECSEED_RESET = 0.01\nTIMEOUT_SERVICES = 0.2\nTIMEOUT_SUBSERVICES = 0.02\nVERIFICATION_BACKTRACK = 5\nVERIFICATION_EXTRA_DELAY = 0.5\nBYTE_MIN = 0x00\nBYTE_MAX = 0xFF\nDUMP_DID_MIN = 0x0000\nDUMP_DID_MAX = 0xFFFF\nDUMP_DID_TIMEOUT = 0.2\n E=args.E\n E=args.E\ndef uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration,\n delay, verify, print_results=True):\n def is_valid_response(message):\ndef __uds_discovery_wrapper(args):\ndef service_discovery(arb_id_request, arb_id_response, timeout,\n min_id=BYTE_MIN, max_id=BYTE_MAX, print_results=True):\ndef __service_discovery_wrapper(args):\ndef sub_discovery(arb_id_request, arb_id_response, diagnostic, service, timeout, print_results=True):\ndef __sub_discovery_wrapper(args):\ndef raw_send(arb_id_request, arb_id_response, service, session_type):\ndef tester_present(arb_id_request, delay, duration,\n suppress_positive_response):\ndef __tester_present_wrapper(args):\ndef ecu_reset(arb_id_request, arb_id_response, reset_type, timeout):\ndef __ecu_reset_wrapper(args):\ndef print_negative_response(response):\ndef __security_seed_wrapper(args):\ndef extended_session(arb_id_request, arb_id_response, session_type):\ndef request_seed(arb_id_request, arb_id_response, level,\n data_record, timeout):\ndef send_key(arb_id_request, arb_id_response, level, key, timeout):\ndef __dump_dids_wrapper(args):\ndef __auto_wrapper(args):\ndef dump_dids(arb_id_request, arb_id_response, timeout,\n min_did=DUMP_DID_MIN, max_did=DUMP_DID_MAX, print_results=True):\ndef __parse_args(args):\ndef module_main(arg_list):"
}
] | from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services
from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229
from caringcaribou.modules import uds
import unittest | 10,619 | from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)
# Remove response delay
self.ecu.DELAY_BEFORE_RESPONSE = 0.0
self.ecu.start_server()
def tearDown(self):
if isinstance(self.ecu, MockEcuIso14229):
self.ecu.__exit__(None, None, None)
def test_uds_discovery(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
blacklist = []
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)]
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_uds_discovery_blacklist(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
# Blacklist the arbitration ID used for response
blacklist = [self.ARB_ID_RESPONSE]
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
# No results expected due to blacklist
expected_result = []
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery(self):
# Service discovery arguments
range_start = 0x09
range_end = 0x13
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# Supported services within specified range
expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET]
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery_empty_range(self):
# Service discovery arguments
range_start = 0x00
range_end = 0x05
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# No services should be found within range
expected_result = []
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format(
result))
def test_ecu_reset_hard_reset_success(self):
# ECU Reset arguments
| from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)
# Remove response delay
self.ecu.DELAY_BEFORE_RESPONSE = 0.0
self.ecu.start_server()
def tearDown(self):
if isinstance(self.ecu, MockEcuIso14229):
self.ecu.__exit__(None, None, None)
def test_uds_discovery(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
blacklist = []
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)]
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_uds_discovery_blacklist(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
# Blacklist the arbitration ID used for response
blacklist = [self.ARB_ID_RESPONSE]
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
# No results expected due to blacklist
expected_result = []
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery(self):
# Service discovery arguments
range_start = 0x09
range_end = 0x13
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# Supported services within specified range
expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET]
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery_empty_range(self):
# Service discovery arguments
range_start = 0x00
range_end = 0x05
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# No services should be found within range
expected_result = []
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format(
result))
def test_ecu_reset_hard_reset_success(self):
# ECU Reset arguments | reset_type = Services.EcuReset.ResetType.HARD_RESET | 4 | 2023-11-13 05:05:46+00:00 | 12k |
L1bra1/WeakMotion | gen_data/gen_weak_data.py | [
{
"identifier": "NuScenes",
"path": "gen_data/nuscenes/nuscenes.py",
"snippet": "class NuScenes:\n \"\"\"\n Database class for nuScenes to help query and retrieve information from the database.\n \"\"\"\n\n def __init__(self,\n version: str = 'v1.0-mini',\n dataroot: str = '/data/sets/nuscenes',\n verbose: bool = True,\n map_resolution: float = 0.1):\n \"\"\"\n Loads database and creates reverse indexes and shortcuts.\n :param version: Version to load (e.g. \"v1.0\", ...).\n :param dataroot: Path to the tables and data.\n :param verbose: Whether to print status messages during load.\n :param map_resolution: Resolution of maps (meters).\n \"\"\"\n self.version = version\n self.dataroot = dataroot\n self.verbose = verbose\n self.table_names = ['category', 'attribute', 'visibility', 'instance', 'sensor', 'calibrated_sensor',\n 'ego_pose', 'log', 'scene', 'sample', 'sample_data', 'sample_annotation', 'map']\n\n assert osp.exists(self.table_root), 'Database version not found: {}'.format(self.table_root)\n\n start_time = time.time()\n if verbose:\n print(\"======\\nLoading NuScenes tables for version {}...\".format(self.version))\n\n # Explicitly assign tables to help the IDE determine valid class members.\n self.category = self.__load_table__('category')\n self.attribute = self.__load_table__('attribute')\n self.visibility = self.__load_table__('visibility')\n self.instance = self.__load_table__('instance')\n self.sensor = self.__load_table__('sensor')\n self.calibrated_sensor = self.__load_table__('calibrated_sensor')\n self.ego_pose = self.__load_table__('ego_pose')\n self.log = self.__load_table__('log')\n self.scene = self.__load_table__('scene')\n self.sample = self.__load_table__('sample')\n self.sample_data = self.__load_table__('sample_data')\n self.sample_annotation = self.__load_table__('sample_annotation')\n self.map = self.__load_table__('map')\n\n # Initialize the colormap which maps from class names to RGB values.\n self.colormap = get_colormap()\n\n lidar_tasks = [t for t in ['lidarseg', 'panoptic'] if osp.exists(osp.join(self.table_root, t + '.json'))]\n if len(lidar_tasks) > 0:\n self.lidarseg_idx2name_mapping = dict()\n self.lidarseg_name2idx_mapping = dict()\n self.load_lidarseg_cat_name_mapping()\n for i, lidar_task in enumerate(lidar_tasks):\n if self.verbose:\n print(f'Loading nuScenes-{lidar_task}...')\n if lidar_task == 'lidarseg':\n self.lidarseg = self.__load_table__(lidar_task)\n else:\n self.panoptic = self.__load_table__(lidar_task)\n\n setattr(self, lidar_task, self.__load_table__(lidar_task))\n label_files = os.listdir(os.path.join(self.dataroot, lidar_task, self.version))\n num_label_files = len([name for name in label_files if (name.endswith('.bin') or name.endswith('.npz'))])\n num_lidarseg_recs = len(getattr(self, lidar_task))\n assert num_lidarseg_recs == num_label_files, \\\n f'Error: there are {num_label_files} label files but {num_lidarseg_recs} {lidar_task} records.'\n self.table_names.append(lidar_task)\n # Sort the colormap to ensure that it is ordered according to the indices in self.category.\n self.colormap = dict({c['name']: self.colormap[c['name']]\n for c in sorted(self.category, key=lambda k: k['index'])})\n\n # If available, also load the image_annotations table created by export_2d_annotations_as_json().\n if osp.exists(osp.join(self.table_root, 'image_annotations.json')):\n self.image_annotations = self.__load_table__('image_annotations')\n\n # Initialize map mask for each map record.\n for map_record in self.map:\n map_record['mask'] = MapMask(osp.join(self.dataroot, map_record['filename']), resolution=map_resolution)\n\n if verbose:\n for table in self.table_names:\n print(\"{} {},\".format(len(getattr(self, table)), table))\n print(\"Done loading in {:.3f} seconds.\\n======\".format(time.time() - start_time))\n\n # Make reverse indexes for common lookups.\n self.__make_reverse_index__(verbose)\n\n # Initialize NuScenesExplorer class.\n self.explorer = NuScenesExplorer(self)\n\n @property\n def table_root(self) -> str:\n \"\"\" Returns the folder where the tables are stored for the relevant version. \"\"\"\n return osp.join(self.dataroot, self.version)\n\n def __load_table__(self, table_name) -> dict:\n \"\"\" Loads a table. \"\"\"\n with open(osp.join(self.table_root, '{}.json'.format(table_name))) as f:\n table = json.load(f)\n return table\n\n def load_lidarseg_cat_name_mapping(self):\n \"\"\" Create mapping from class index to class name, and vice versa, for easy lookup later on \"\"\"\n for lidarseg_category in self.category:\n # Check that the category records contain both the keys 'name' and 'index'.\n assert 'index' in lidarseg_category.keys(), \\\n 'Please use the category.json that comes with nuScenes-lidarseg, and not the old category.json.'\n\n self.lidarseg_idx2name_mapping[lidarseg_category['index']] = lidarseg_category['name']\n self.lidarseg_name2idx_mapping[lidarseg_category['name']] = lidarseg_category['index']\n\n def __make_reverse_index__(self, verbose: bool) -> None:\n \"\"\"\n De-normalizes database to create reverse indices for common cases.\n :param verbose: Whether to print outputs.\n \"\"\"\n\n start_time = time.time()\n if verbose:\n print(\"Reverse indexing ...\")\n\n # Store the mapping from token to table index for each table.\n self._token2ind = dict()\n for table in self.table_names:\n self._token2ind[table] = dict()\n\n for ind, member in enumerate(getattr(self, table)):\n self._token2ind[table][member['token']] = ind\n\n # Decorate (adds short-cut) sample_annotation table with for category name.\n for record in self.sample_annotation:\n inst = self.get('instance', record['instance_token'])\n record['category_name'] = self.get('category', inst['category_token'])['name']\n\n # Decorate (adds short-cut) sample_data with sensor information.\n for record in self.sample_data:\n cs_record = self.get('calibrated_sensor', record['calibrated_sensor_token'])\n sensor_record = self.get('sensor', cs_record['sensor_token'])\n record['sensor_modality'] = sensor_record['modality']\n record['channel'] = sensor_record['channel']\n\n # Reverse-index samples with sample_data and annotations.\n for record in self.sample:\n record['data'] = {}\n record['anns'] = []\n\n for record in self.sample_data:\n if record['is_key_frame']:\n sample_record = self.get('sample', record['sample_token'])\n sample_record['data'][record['channel']] = record['token']\n\n for ann_record in self.sample_annotation:\n sample_record = self.get('sample', ann_record['sample_token'])\n sample_record['anns'].append(ann_record['token'])\n\n # Add reverse indices from log records to map records.\n if 'log_tokens' not in self.map[0].keys():\n raise Exception('Error: log_tokens not in map table. This code is not compatible with the teaser dataset.')\n log_to_map = dict()\n for map_record in self.map:\n for log_token in map_record['log_tokens']:\n log_to_map[log_token] = map_record['token']\n for log_record in self.log:\n log_record['map_token'] = log_to_map[log_record['token']]\n\n if verbose:\n print(\"Done reverse indexing in {:.1f} seconds.\\n======\".format(time.time() - start_time))\n\n def get(self, table_name: str, token: str) -> dict:\n \"\"\"\n Returns a record from table in constant runtime.\n :param table_name: Table name.\n :param token: Token of the record.\n :return: Table record. See README.md for record details for each table.\n \"\"\"\n assert table_name in self.table_names, \"Table {} not found\".format(table_name)\n\n return getattr(self, table_name)[self.getind(table_name, token)]\n\n def getind(self, table_name: str, token: str) -> int:\n \"\"\"\n This returns the index of the record in a table in constant runtime.\n :param table_name: Table name.\n :param token: Token of the record.\n :return: The index of the record in table, table is an array.\n \"\"\"\n return self._token2ind[table_name][token]\n\n def field2token(self, table_name: str, field: str, query) -> List[str]:\n \"\"\"\n This function queries all records for a certain field value, and returns the tokens for the matching records.\n Warning: this runs in linear time.\n :param table_name: Table name.\n :param field: Field name. See README.md for details.\n :param query: Query to match against. Needs to type match the content of the query field.\n :return: List of tokens for the matching records.\n \"\"\"\n matches = []\n for member in getattr(self, table_name):\n if member[field] == query:\n matches.append(member['token'])\n return matches\n\n def get_sample_data_path(self, sample_data_token: str) -> str:\n \"\"\" Returns the path to a sample_data. \"\"\"\n\n sd_record = self.get('sample_data', sample_data_token)\n return osp.join(self.dataroot, sd_record['filename'])\n\n def get_sample_data(self, sample_data_token: str,\n box_vis_level: BoxVisibility = BoxVisibility.ANY,\n selected_anntokens: List[str] = None,\n use_flat_vehicle_coordinates: bool = False) -> \\\n Tuple[str, List[Box], np.array]:\n \"\"\"\n Returns the data path as well as all annotations related to that sample_data.\n Note that the boxes are transformed into the current sensor's coordinate frame.\n :param sample_data_token: Sample_data token.\n :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.\n :param selected_anntokens: If provided only return the selected annotation.\n :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is\n aligned to z-plane in the world.\n :return: (data_path, boxes, camera_intrinsic <np.array: 3, 3>)\n \"\"\"\n\n # Retrieve sensor & pose records\n sd_record = self.get('sample_data', sample_data_token)\n cs_record = self.get('calibrated_sensor', sd_record['calibrated_sensor_token'])\n sensor_record = self.get('sensor', cs_record['sensor_token'])\n pose_record = self.get('ego_pose', sd_record['ego_pose_token'])\n\n data_path = self.get_sample_data_path(sample_data_token)\n\n if sensor_record['modality'] == 'camera':\n cam_intrinsic = np.array(cs_record['camera_intrinsic'])\n imsize = (sd_record['width'], sd_record['height'])\n else:\n cam_intrinsic = None\n imsize = None\n\n # Retrieve all sample annotations and map to sensor coordinate system.\n if selected_anntokens is not None:\n boxes = list(map(self.get_box, selected_anntokens))\n else:\n boxes = self.get_boxes(sample_data_token)\n\n # Make list of Box objects including coord system transforms.\n box_list = []\n for box in boxes:\n if use_flat_vehicle_coordinates:\n # Move box to ego vehicle coord system parallel to world z plane.\n yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0]\n box.translate(-np.array(pose_record['translation']))\n box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)\n else:\n # Move box to ego vehicle coord system.\n box.translate(-np.array(pose_record['translation']))\n box.rotate(Quaternion(pose_record['rotation']).inverse)\n\n # Move box to sensor coord system.\n box.translate(-np.array(cs_record['translation']))\n box.rotate(Quaternion(cs_record['rotation']).inverse)\n\n if sensor_record['modality'] == 'camera' and not \\\n box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level):\n continue\n\n box_list.append(box)\n\n return data_path, box_list, cam_intrinsic\n\n def get_box(self, sample_annotation_token: str) -> Box:\n \"\"\"\n Instantiates a Box class from a sample annotation record.\n :param sample_annotation_token: Unique sample_annotation identifier.\n \"\"\"\n record = self.get('sample_annotation', sample_annotation_token)\n return Box(record['translation'], record['size'], Quaternion(record['rotation']),\n name=record['category_name'], token=record['token'])\n\n def get_boxes(self, sample_data_token: str) -> List[Box]:\n \"\"\"\n Instantiates Boxes for all annotation for a particular sample_data record. If the sample_data is a\n keyframe, this returns the annotations for that sample. But if the sample_data is an intermediate\n sample_data, a linear interpolation is applied to estimate the location of the boxes at the time the\n sample_data was captured.\n :param sample_data_token: Unique sample_data identifier.\n \"\"\"\n\n # Retrieve sensor & pose records\n sd_record = self.get('sample_data', sample_data_token)\n curr_sample_record = self.get('sample', sd_record['sample_token'])\n\n if curr_sample_record['prev'] == \"\" or sd_record['is_key_frame']:\n # If no previous annotations available, or if sample_data is keyframe just return the current ones.\n boxes = list(map(self.get_box, curr_sample_record['anns']))\n\n else:\n prev_sample_record = self.get('sample', curr_sample_record['prev'])\n\n curr_ann_recs = [self.get('sample_annotation', token) for token in curr_sample_record['anns']]\n prev_ann_recs = [self.get('sample_annotation', token) for token in prev_sample_record['anns']]\n\n # Maps instance tokens to prev_ann records\n prev_inst_map = {entry['instance_token']: entry for entry in prev_ann_recs}\n\n t0 = prev_sample_record['timestamp']\n t1 = curr_sample_record['timestamp']\n t = sd_record['timestamp']\n\n # There are rare situations where the timestamps in the DB are off so ensure that t0 < t < t1.\n t = max(t0, min(t1, t))\n\n boxes = []\n for curr_ann_rec in curr_ann_recs:\n\n if curr_ann_rec['instance_token'] in prev_inst_map:\n # If the annotated instance existed in the previous frame, interpolate center & orientation.\n prev_ann_rec = prev_inst_map[curr_ann_rec['instance_token']]\n\n # Interpolate center.\n center = [np.interp(t, [t0, t1], [c0, c1]) for c0, c1 in zip(prev_ann_rec['translation'],\n curr_ann_rec['translation'])]\n\n # Interpolate orientation.\n rotation = Quaternion.slerp(q0=Quaternion(prev_ann_rec['rotation']),\n q1=Quaternion(curr_ann_rec['rotation']),\n amount=(t - t0) / (t1 - t0))\n\n box = Box(center, curr_ann_rec['size'], rotation, name=curr_ann_rec['category_name'],\n token=curr_ann_rec['token'])\n else:\n # If not, simply grab the current annotation.\n box = self.get_box(curr_ann_rec['token'])\n\n boxes.append(box)\n return boxes\n\n\n def box_velocity(self, sample_annotation_token: str, max_time_diff: float = 1.5) -> np.ndarray:\n \"\"\"\n Estimate the velocity for an annotation.\n If possible, we compute the centered difference between the previous and next frame.\n Otherwise we use the difference between the current and previous/next frame.\n If the velocity cannot be estimated, values are set to np.nan.\n :param sample_annotation_token: Unique sample_annotation identifier.\n :param max_time_diff: Max allowed time diff between consecutive samples that are used to estimate velocities.\n :return: <np.float: 3>. Velocity in x/y/z direction in m/s.\n \"\"\"\n\n current = self.get('sample_annotation', sample_annotation_token)\n has_prev = current['prev'] != ''\n has_next = current['next'] != ''\n\n # Cannot estimate velocity for a single annotation.\n if not has_prev and not has_next:\n return np.array([np.nan, np.nan, np.nan])\n\n if has_prev:\n first = self.get('sample_annotation', current['prev'])\n else:\n first = current\n\n if has_next:\n last = self.get('sample_annotation', current['next'])\n else:\n last = current\n\n pos_last = np.array(last['translation'])\n pos_first = np.array(first['translation'])\n pos_diff = pos_last - pos_first\n\n time_last = 1e-6 * self.get('sample', last['sample_token'])['timestamp']\n time_first = 1e-6 * self.get('sample', first['sample_token'])['timestamp']\n time_diff = time_last - time_first\n\n if has_next and has_prev:\n # If doing centered difference, allow for up to double the max_time_diff.\n max_time_diff *= 2\n\n if time_diff > max_time_diff:\n # If time_diff is too big, don't return an estimate.\n return np.array([np.nan, np.nan, np.nan])\n else:\n return pos_diff / time_diff\n\n def get_sample_lidarseg_stats(self,\n sample_token: str,\n sort_by: str = 'count',\n lidarseg_preds_bin_path: str = None,\n gt_from: str = 'lidarseg') -> None:\n \"\"\"\n Print the number of points for each class in the lidar pointcloud of a sample. Classes with have no\n points in the pointcloud will not be printed.\n :param sample_token: Sample token.\n :param sort_by: One of three options: count / name / index. If 'count`, the stats will be printed in\n ascending order of frequency; if `name`, the stats will be printed alphabetically\n according to class name; if `index`, the stats will be printed in ascending order of\n class index.\n :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation\n predictions for the sample.\n :param gt_from: 'lidarseg' or 'panoptic', ground truth source of point semantic labels.\n \"\"\"\n assert gt_from in ['lidarseg', 'panoptic'], f'gt_from can only be lidarseg or panoptic, get {gt_from}'\n assert hasattr(self, gt_from), f'Error: You have no {gt_from} data; unable to get ' \\\n 'statistics for segmentation of the point cloud.'\n assert sort_by in ['count', 'name', 'index'], 'Error: sort_by can only be one of the following: ' \\\n 'count / name / index.'\n semantic_table = getattr(self, gt_from)\n sample_rec = self.get('sample', sample_token)\n ref_sd_token = sample_rec['data']['LIDAR_TOP']\n ref_sd_record = self.get('sample_data', ref_sd_token)\n\n # Ensure that lidar pointcloud is from a keyframe.\n assert ref_sd_record['is_key_frame'], 'Error: Only pointclouds which are keyframes have ' \\\n 'lidar segmentation labels. Rendering aborted.'\n\n if lidarseg_preds_bin_path:\n lidarseg_labels_filename = lidarseg_preds_bin_path\n assert os.path.exists(lidarseg_labels_filename), \\\n 'Error: Unable to find {} to load the predictions for sample token {} ' \\\n '(lidar sample data token {}) from.'.format(lidarseg_labels_filename, sample_token, ref_sd_token)\n\n header = '===== Statistics for ' + sample_token + ' (predictions) ====='\n else:\n assert len(semantic_table) > 0, 'Error: There are no ground truth labels found for nuScenes-{} for {}.'\\\n 'Are you loading the test set? \\nIf you want to see the sample statistics'\\\n ' for your predictions, pass a path to the appropriate .bin/npz file using'\\\n ' the lidarseg_preds_bin_path argument.'.format(gt_from, self.version)\n lidar_sd_token = self.get('sample', sample_token)['data']['LIDAR_TOP']\n lidarseg_labels_filename = os.path.join(self.dataroot,\n self.get(gt_from, lidar_sd_token)['filename'])\n\n header = '===== Statistics for ' + sample_token + ' ====='\n print(header)\n\n points_label = load_bin_file(lidarseg_labels_filename, type=gt_from)\n if gt_from == 'panoptic':\n points_label = panoptic_to_lidarseg(points_label)\n lidarseg_counts = get_stats(points_label, len(self.lidarseg_idx2name_mapping))\n\n lidarseg_counts_dict = dict()\n for i in range(len(lidarseg_counts)):\n lidarseg_counts_dict[self.lidarseg_idx2name_mapping[i]] = lidarseg_counts[i]\n\n if sort_by == 'count':\n out = sorted(lidarseg_counts_dict.items(), key=lambda item: item[1])\n elif sort_by == 'name':\n out = sorted(lidarseg_counts_dict.items())\n else:\n out = lidarseg_counts_dict.items()\n\n for class_name, count in out:\n if count > 0:\n idx = self.lidarseg_name2idx_mapping[class_name]\n print('{:3} {:40} n={:12,}'.format(idx, class_name, count))\n\n print('=' * len(header))\n\n def list_categories(self) -> None:\n self.explorer.list_categories()\n\n def list_lidarseg_categories(self, sort_by: str = 'count', gt_from: str = 'lidarseg') -> None:\n self.explorer.list_lidarseg_categories(sort_by=sort_by, gt_from=gt_from)\n\n def list_panoptic_instances(self, sort_by: str = 'count', get_hist: bool = False) -> None:\n self.explorer.list_panoptic_instances(sort_by=sort_by, get_hist=get_hist)\n\n def list_attributes(self) -> None:\n self.explorer.list_attributes()\n\n def list_scenes(self) -> None:\n self.explorer.list_scenes()\n\n def list_sample(self, sample_token: str) -> None:\n self.explorer.list_sample(sample_token)\n\n def render_pointcloud_in_image(self, sample_token: str, dot_size: int = 5, pointsensor_channel: str = 'LIDAR_TOP',\n camera_channel: str = 'CAM_FRONT', out_path: str = None,\n render_intensity: bool = False,\n show_lidarseg: bool = False,\n filter_lidarseg_labels: List = None,\n show_lidarseg_legend: bool = False,\n verbose: bool = True,\n lidarseg_preds_bin_path: str = None,\n show_panoptic: bool = False) -> None:\n self.explorer.render_pointcloud_in_image(sample_token, dot_size, pointsensor_channel=pointsensor_channel,\n camera_channel=camera_channel, out_path=out_path,\n render_intensity=render_intensity,\n show_lidarseg=show_lidarseg,\n filter_lidarseg_labels=filter_lidarseg_labels,\n show_lidarseg_legend=show_lidarseg_legend,\n verbose=verbose,\n lidarseg_preds_bin_path=lidarseg_preds_bin_path,\n show_panoptic=show_panoptic)\n\n def render_sample(self, sample_token: str,\n box_vis_level: BoxVisibility = BoxVisibility.ANY,\n nsweeps: int = 1,\n out_path: str = None,\n show_lidarseg: bool = False,\n filter_lidarseg_labels: List = None,\n lidarseg_preds_bin_path: str = None,\n verbose: bool = True,\n show_panoptic: bool = False) -> None:\n self.explorer.render_sample(sample_token, box_vis_level, nsweeps=nsweeps, out_path=out_path,\n show_lidarseg=show_lidarseg, filter_lidarseg_labels=filter_lidarseg_labels,\n lidarseg_preds_bin_path=lidarseg_preds_bin_path, verbose=verbose,\n show_panoptic=show_panoptic)\n\n def render_sample_data(self, sample_data_token: str, with_anns: bool = True,\n box_vis_level: BoxVisibility = BoxVisibility.ANY, axes_limit: float = 40, ax: Axes = None,\n nsweeps: int = 1, out_path: str = None, underlay_map: bool = True,\n use_flat_vehicle_coordinates: bool = True,\n show_lidarseg: bool = False,\n show_lidarseg_legend: bool = False,\n filter_lidarseg_labels: List = None,\n lidarseg_preds_bin_path: str = None, verbose: bool = True,\n show_panoptic: bool = False) -> None:\n self.explorer.render_sample_data(sample_data_token, with_anns, box_vis_level, axes_limit, ax, nsweeps=nsweeps,\n out_path=out_path,\n underlay_map=underlay_map,\n use_flat_vehicle_coordinates=use_flat_vehicle_coordinates,\n show_lidarseg=show_lidarseg,\n show_lidarseg_legend=show_lidarseg_legend,\n filter_lidarseg_labels=filter_lidarseg_labels,\n lidarseg_preds_bin_path=lidarseg_preds_bin_path,\n verbose=verbose,\n show_panoptic=show_panoptic)\n\n def render_annotation(self, sample_annotation_token: str, margin: float = 10, view: np.ndarray = np.eye(4),\n box_vis_level: BoxVisibility = BoxVisibility.ANY, out_path: str = None,\n extra_info: bool = False) -> None:\n self.explorer.render_annotation(sample_annotation_token, margin, view, box_vis_level, out_path, extra_info)\n\n def render_instance(self, instance_token: str, margin: float = 10, view: np.ndarray = np.eye(4),\n box_vis_level: BoxVisibility = BoxVisibility.ANY, out_path: str = None,\n extra_info: bool = False) -> None:\n self.explorer.render_instance(instance_token, margin, view, box_vis_level, out_path, extra_info)\n\n def render_scene(self, scene_token: str, freq: float = 10, imsize: Tuple[float, float] = (640, 360),\n out_path: str = None) -> None:\n self.explorer.render_scene(scene_token, freq, imsize, out_path)\n\n def render_scene_channel(self, scene_token: str, channel: str = 'CAM_FRONT', freq: float = 10,\n imsize: Tuple[float, float] = (640, 360), out_path: str = None) -> None:\n self.explorer.render_scene_channel(scene_token, channel=channel, freq=freq, imsize=imsize, out_path=out_path)\n\n def render_egoposes_on_map(self, log_location: str, scene_tokens: List = None, out_path: str = None) -> None:\n self.explorer.render_egoposes_on_map(log_location, scene_tokens, out_path=out_path)\n\n def render_scene_channel_lidarseg(self, scene_token: str,\n channel: str,\n out_folder: str = None,\n filter_lidarseg_labels: Iterable[int] = None,\n with_anns: bool = False,\n render_mode: str = None,\n verbose: bool = True,\n imsize: Tuple[int, int] = (640, 360),\n freq: float = 2,\n dpi: int = 150,\n lidarseg_preds_folder: str = None,\n show_panoptic: bool = False) -> None:\n self.explorer.render_scene_channel_lidarseg(scene_token,\n channel,\n out_folder=out_folder,\n filter_lidarseg_labels=filter_lidarseg_labels,\n with_anns=with_anns,\n render_mode=render_mode,\n verbose=verbose,\n imsize=imsize,\n freq=freq,\n dpi=dpi,\n lidarseg_preds_folder=lidarseg_preds_folder,\n show_panoptic=show_panoptic)\n\n def render_scene_lidarseg(self, scene_token: str,\n out_path: str = None,\n filter_lidarseg_labels: Iterable[int] = None,\n with_anns: bool = False,\n imsize: Tuple[int, int] = (640, 360),\n freq: float = 2,\n verbose: bool = True,\n dpi: int = 200,\n lidarseg_preds_folder: str = None,\n show_panoptic: bool = False) -> None:\n self.explorer.render_scene_lidarseg(scene_token,\n out_path=out_path,\n filter_lidarseg_labels=filter_lidarseg_labels,\n with_anns=with_anns,\n imsize=imsize,\n freq=freq,\n verbose=verbose,\n dpi=dpi,\n lidarseg_preds_folder=lidarseg_preds_folder,\n show_panoptic=show_panoptic)"
},
{
"identifier": "LidarPointCloud",
"path": "gen_data/nuscenes/utils/data_classes.py",
"snippet": "class LidarPointCloud(PointCloud):\n\n @staticmethod\n def nbr_dims() -> int:\n \"\"\"\n Returns the number of dimensions.\n :return: Number of dimensions.\n \"\"\"\n return 4\n\n @classmethod\n def from_file(cls, file_name: str) -> 'LidarPointCloud':\n \"\"\"\n Loads LIDAR data from binary numpy format. Data is stored as (x, y, z, intensity, ring index).\n :param file_name: Path of the pointcloud file on disk.\n :return: LidarPointCloud instance (x, y, z, intensity).\n \"\"\"\n\n assert file_name.endswith('.bin'), 'Unsupported filetype {}'.format(file_name)\n\n scan = np.fromfile(file_name, dtype=np.float32)\n points = scan.reshape((-1, 5))[:, :cls.nbr_dims()]\n return cls(points.T)"
},
{
"identifier": "view_points",
"path": "gen_data/nuscenes/utils/geometry_utils.py",
"snippet": "def view_points(points: np.ndarray, view: np.ndarray, normalize: bool) -> np.ndarray:\n \"\"\"\n This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and\n orthographic projections. It first applies the dot product between the points and the view. By convention,\n the view should be such that the data is projected onto the first 2 axis. It then optionally applies a\n normalization along the third dimension.\n\n For a perspective projection the view should be a 3x3 camera matrix, and normalize=True\n For an orthographic projection with translation the view is a 3x4 matrix and normalize=False\n For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns\n all zeros) and normalize=False\n\n :param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.\n :param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).\n The projection should be such that the corners are projected onto the first 2 axis.\n :param normalize: Whether to normalize the remaining coordinate (along the third axis).\n :return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.\n \"\"\"\n\n assert view.shape[0] <= 4\n assert view.shape[1] <= 4\n assert points.shape[0] == 3\n\n viewpad = np.eye(4)\n viewpad[:view.shape[0], :view.shape[1]] = view\n\n nbr_points = points.shape[1]\n\n # Do operation in homogenous coordinates.\n points = np.concatenate((points, np.ones((1, nbr_points))))\n points = np.dot(viewpad, points)\n points = points[:3, :]\n\n if normalize:\n points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)\n\n return points"
},
{
"identifier": "transform_matrix",
"path": "gen_data/nuscenes/utils/geometry_utils.py",
"snippet": "def transform_matrix(translation: np.ndarray = np.array([0, 0, 0]),\n rotation: Quaternion = Quaternion([1, 0, 0, 0]),\n inverse: bool = False) -> np.ndarray:\n \"\"\"\n Convert pose to transformation matrix.\n :param translation: <np.float32: 3>. Translation in x, y, z.\n :param rotation: Rotation in quaternions (w ri rj rk).\n :param inverse: Whether to compute inverse transform matrix.\n :return: <np.float32: 4, 4>. Transformation matrix.\n \"\"\"\n tm = np.eye(4)\n\n if inverse:\n rot_inv = rotation.rotation_matrix.T\n trans = np.transpose(-np.array(translation))\n tm[:3, :3] = rot_inv\n tm[:3, 3] = rot_inv.dot(trans)\n else:\n tm[:3, :3] = rotation.rotation_matrix\n tm[:3, 3] = np.transpose(np.array(translation))\n\n return tm"
}
] | from gen_data.nuscenes.nuscenes import NuScenes
from gen_data.nuscenes.utils.data_classes import LidarPointCloud
from functools import reduce
from gen_data.nuscenes.utils.geometry_utils import view_points, transform_matrix
from pyquaternion import Quaternion
import os
import numpy as np
import argparse | 10,304 |
_, sort_idx = np.unique(all_times, return_index=True)
unique_times = all_times[np.sort(sort_idx)] # Preserve the item order in unique_times
num_sweeps = len(unique_times)
# Make sure we have sufficient past and future sweeps
if num_sweeps != (nsweeps_back + nsweeps_forward):
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
# Reset
adj_seq_cnt = 0
save_weak_dict_list = list()
continue
if adj_seq_cnt == 0:
save_weak_dict = dict()
lidar_curr_sample = curr_sample
key_timestamps = np.zeros(3)
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
_, ref_from_car, car_from_global, ref_time = get_pc_pose(lidar_sd_token_data, inverse=True)
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['prev'])
# 0 past (-0.5s); 1 current (0s); 2 future (+0.5s)
for key_frame_index in range(3):
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
lidar_sd_token = lidar_sd_token_data['token']
save_weak_dict['token_' + str(key_frame_index)] = lidar_sd_token
current_pc, car_from_current, global_from_car, timestamp = get_pc_pose(lidar_sd_token_data, inverse=False)
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc[:3, :] = trans_matrix.dot(np.vstack((current_pc[:3, :], np.ones(current_pc.shape[1]))))[:3, :]
save_weak_dict['synchronized_pc_' + str(key_frame_index)] = current_pc[:3, :]
lidarseg_labels_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
key_timestamps[key_frame_index] = 1e-6 * lidar_sd_token_data['timestamp']
save_weak_dict['points_label_' + str(key_frame_index)] = points_label
sample_idx, pc_random_index_dict = gen_random_index_for_pc(current_pc, lidar_sd_token, pc_random_index_dict)
save_weak_dict['sample_idx_' + str(key_frame_index)] = sample_idx
if key_frame_index != 2:
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['next'])
save_weak_dict['key_timestamp'] = key_timestamps
save_weak_dict_list.append(save_weak_dict)
adj_seq_cnt += 1
if adj_seq_cnt == num_adj_seqs:
for seq_idx, seq_weak_dict in enumerate(save_weak_dict_list):
# save the data
save_directory = check_folder(os.path.join(args.savepath, str(scene_idx) + '_' + str(save_seq_cnt)))
save_file_name = os.path.join(save_directory, str(seq_idx) + '.npy')
np.save(save_file_name, arr=seq_weak_dict)
print(" >> {} - {} Finish sample: {}, sequence {}".format(seq_weak_dict['key_timestamp'][0], seq_weak_dict['key_timestamp'][1], save_seq_cnt, seq_idx))
save_seq_cnt += 1
adj_seq_cnt = 0
save_weak_dict_list = list()
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
else:
flag = False
for _ in range(skip_frame + 1):
if curr_sample_data['next'] != '':
curr_sample_data = nusc.get('sample_data', curr_sample_data['next'])
else:
flag = True
break
if flag: # No more sample frames
break
save_file_name = os.path.join(sample_info_directory, str(scene_idx) + '_sample_info.npy')
np.save(save_file_name, arr=pc_random_index_dict)
def get_pc_pose(ref_sd_rec, inverse = True):
# Get reference pose and timestamp
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame
| """
This code is to generate Foreground/Background information for the training set of nuScenes data.
And the code is modified based on 'gen_data.py' in MotionNet(https://www.merl.com/research/?research=license-request&sw=MotionNet)
"""
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root', default='/path_to/nuScenes/nuScenes-data/', type=str, help='Root path to nuScenes dataset')
parser.add_argument('-s', '--split', default='train', type=str, help='The data split [train/val]')
parser.add_argument('-p', '--savepath', default='/path_to/nuScenes/weak-data/', type=str, help='Directory for saving the generated data')
args = parser.parse_args()
nusc = NuScenes(version='v1.0-trainval', dataroot=args.root, verbose=True)
print("Total number of scenes:", len(nusc.scene))
class_map = {'vehicle.car': 1, 'vehicle.bus.rigid': 1, 'vehicle.bus.bendy': 1, 'human.pedestrian': 2,
'vehicle.bicycle': 3} # background: 0, other: 4
if args.split == 'train':
num_keyframe_skipped = 0 # The number of keyframes we will skip when dumping the data
nsweeps_back = 30 # Number of frames back to the history (including the current timestamp)
nsweeps_forward = 20 # Number of frames into the future (does not include the current timestamp)
skip_frame = 0 # The number of frames skipped for the adjacent sequence
num_adj_seqs = 2 # number of adjacent sequences, among which the time gap is \delta t
else:
num_keyframe_skipped = 1
nsweeps_back = 25 # Setting this to 30 (for training) or 25 (for testing) allows conducting ablation studies on frame numbers
nsweeps_forward = 20
skip_frame = 0
num_adj_seqs = 1
# The specifications for BEV maps
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]])
past_frame_skip = 3 # when generating the BEV maps, how many history frames need to be skipped
future_frame_skip = 0 # when generating the BEV maps, how many future frames need to be skipped
num_past_frames_for_bev_seq = 5 # the number of past frames for BEV map sequence
scenes = np.load('split.npy', allow_pickle=True).item().get(args.split)
print("Split: {}, which contains {} scenes.".format(args.split, len(scenes)))
args.savepath = check_folder(args.savepath)
sample_info_directory = check_folder(os.path.join(args.savepath, args.split + '_sample_info'))
args.savepath = check_folder(os.path.join(args.savepath, args.split))
def gen_data():
res_scenes = list()
for s in scenes:
s_id = s.split('_')[1]
res_scenes.append(int(s_id))
for scene_idx in res_scenes:
curr_scene = nusc.scene[scene_idx]
first_sample_token = curr_scene['first_sample_token']
curr_sample = nusc.get('sample', first_sample_token)
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
adj_seq_cnt = 0
save_seq_cnt = 0 # only used for save data file name
save_weak_dict_list = list()
pc_random_index_dict = dict()
# Iterate each sample data
print("Processing scene {} ...".format(scene_idx))
while curr_sample_data['next'] != '':
all_times = \
LidarPointCloud.from_file_multisweep_bf_sample_data_return_times(nusc, curr_sample_data,
nsweeps_back=nsweeps_back,
nsweeps_forward=nsweeps_forward)
_, sort_idx = np.unique(all_times, return_index=True)
unique_times = all_times[np.sort(sort_idx)] # Preserve the item order in unique_times
num_sweeps = len(unique_times)
# Make sure we have sufficient past and future sweeps
if num_sweeps != (nsweeps_back + nsweeps_forward):
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
# Reset
adj_seq_cnt = 0
save_weak_dict_list = list()
continue
if adj_seq_cnt == 0:
save_weak_dict = dict()
lidar_curr_sample = curr_sample
key_timestamps = np.zeros(3)
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
_, ref_from_car, car_from_global, ref_time = get_pc_pose(lidar_sd_token_data, inverse=True)
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['prev'])
# 0 past (-0.5s); 1 current (0s); 2 future (+0.5s)
for key_frame_index in range(3):
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
lidar_sd_token = lidar_sd_token_data['token']
save_weak_dict['token_' + str(key_frame_index)] = lidar_sd_token
current_pc, car_from_current, global_from_car, timestamp = get_pc_pose(lidar_sd_token_data, inverse=False)
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc[:3, :] = trans_matrix.dot(np.vstack((current_pc[:3, :], np.ones(current_pc.shape[1]))))[:3, :]
save_weak_dict['synchronized_pc_' + str(key_frame_index)] = current_pc[:3, :]
lidarseg_labels_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
key_timestamps[key_frame_index] = 1e-6 * lidar_sd_token_data['timestamp']
save_weak_dict['points_label_' + str(key_frame_index)] = points_label
sample_idx, pc_random_index_dict = gen_random_index_for_pc(current_pc, lidar_sd_token, pc_random_index_dict)
save_weak_dict['sample_idx_' + str(key_frame_index)] = sample_idx
if key_frame_index != 2:
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['next'])
save_weak_dict['key_timestamp'] = key_timestamps
save_weak_dict_list.append(save_weak_dict)
adj_seq_cnt += 1
if adj_seq_cnt == num_adj_seqs:
for seq_idx, seq_weak_dict in enumerate(save_weak_dict_list):
# save the data
save_directory = check_folder(os.path.join(args.savepath, str(scene_idx) + '_' + str(save_seq_cnt)))
save_file_name = os.path.join(save_directory, str(seq_idx) + '.npy')
np.save(save_file_name, arr=seq_weak_dict)
print(" >> {} - {} Finish sample: {}, sequence {}".format(seq_weak_dict['key_timestamp'][0], seq_weak_dict['key_timestamp'][1], save_seq_cnt, seq_idx))
save_seq_cnt += 1
adj_seq_cnt = 0
save_weak_dict_list = list()
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
else:
flag = False
for _ in range(skip_frame + 1):
if curr_sample_data['next'] != '':
curr_sample_data = nusc.get('sample_data', curr_sample_data['next'])
else:
flag = True
break
if flag: # No more sample frames
break
save_file_name = os.path.join(sample_info_directory, str(scene_idx) + '_sample_info.npy')
np.save(save_file_name, arr=pc_random_index_dict)
def get_pc_pose(ref_sd_rec, inverse = True):
# Get reference pose and timestamp
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame | ref_from_car = transform_matrix(ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), | 3 | 2023-11-12 07:03:29+00:00 | 12k |
c3exchange/c3-smartcontracts-v1 | contracts_unified/core/methods/pool_move.py | [
{
"identifier": "health_check",
"path": "contracts_unified/core/internal/health_check.py",
"snippet": "@ABIReturnSubroutine\ndef health_check(\n account: AccountAddress,\n use_maint: abi.Bool,\n *,\n output: ExcessMargin,\n) -> Expr:\n \"\"\"Calculates the user's health\"\"\"\n\n count = abi.Uint64()\n\n user_data = UserInstrumentData()\n cash = Amount()\n principal = SignedAmount()\n index = abi.Uint64()\n\n instrument_id = InstrumentId()\n instrument = InstrumentListElement()\n loaned_balance = SignedAmount()\n balance_sum = SignedAmount()\n has_lend = abi.Uint64()\n\n haircut = Ratio()\n margin = Ratio()\n\n optimal_utilization = Ratio()\n\n price = Price()\n\n return Seq(\n # Clear output\n output.set(Int(0)),\n\n # Loop over instruments\n count.set(cast(abi.ReturnedValue, LocalStateHandler.get_user_instrument_count(account))),\n For(\n instrument_id.set(Int(0)),\n instrument_id.get() < count.get(),\n instrument_id.set(instrument_id.get() + Int(1)),\n ).Do(\n Seq(\n # Extract user position\n user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n cash.set(user_data.cash),\n principal.set(user_data.principal),\n index.set(user_data.index),\n\n If(cash.get() | principal.get()).Then(\n # Get price\n price.set(cast(abi.ReturnedValue, get_normalized_price(instrument_id))),\n\n # Get instrument\n instrument.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n\n # Get loan balance(netting)\n If(principal.get() != Int(0))\n .Then(\n has_lend.set(Not(signed_ltz(principal.get()))),\n If(has_lend.get())\n .Then(\n instrument.lend_index.use(lambda lend_index:\n loaned_balance.set(calculate_accrued_lend(principal, index, lend_index))\n )\n )\n .Else(\n instrument.borrow_index.use(lambda borrow_index:\n loaned_balance.set(calculate_accrued_borrow(principal, index, borrow_index)),\n )\n ),\n )\n .Else(\n has_lend.set(Int(0)),\n loaned_balance.set(Int(0))\n ),\n\n # Calculate balance sum\n balance_sum.set(signed_add(cash.get(), loaned_balance.get())),\n\n # Get risk factors\n # Load risk factors\n If(use_maint.get())\n .Then(\n instrument.maintenance_haircut.store_into(haircut),\n instrument.maintenance_margin.store_into(margin),\n )\n .Else(\n instrument.initial_haircut.store_into(haircut),\n instrument.initial_margin.store_into(margin),\n ),\n\n # Load optimal utilization\n instrument.optimal_utilization.store_into(optimal_utilization),\n\n # Calculate health for this asset and add to output\n # Add first term, health += price * sum * ratio\n If(signed_ltz(balance_sum.get()))\n .Then(\n output.set(signed_sub(output.get(), WideRatio([price.get(), signed_neg(balance_sum.get()), Int(RATIO_ONE) + margin.get()], [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE)])))\n )\n .Else(\n output.set(signed_add(output.get(), WideRatio([price.get(), balance_sum.get(), Int(RATIO_ONE) - haircut.get()], [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE)])))\n ),\n\n # Lend positions should be further multiplied by (1 - optimal_utilization)\n # We already included the 1 term, so we need to subtract the optimal utilization\n If(has_lend.get())\n .Then(\n output.set(\n signed_sub(\n output.get(),\n WideRatio(\n [price.get(), loaned_balance.get(), Int(RATIO_ONE) - haircut.get(), optimal_utilization.get()],\n # Normalize haircut and utilization\n [Int(PRICECASTER_RESCALE_FACTOR * RATIO_ONE * RATIO_ONE)],\n ),\n )\n )\n )\n ),\n )\n ),\n Log(Concat(account.get(), Itob(output.get())))\n )"
},
{
"identifier": "signed_add_to_cash",
"path": "contracts_unified/core/internal/move.py",
"snippet": "@ABIReturnSubroutine\ndef signed_add_to_cash(\n account: AccountAddress,\n instrument_id: InstrumentId,\n amount: Amount,\n) -> Expr:\n \"\"\"Adds amount to the user's asset balance\"\"\"\n\n data = UserInstrumentData()\n new_cash = SignedAmount()\n\n return Seq(\n # Load user data\n data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n\n # Update user data\n data.cash.use(lambda cash:\n new_cash.set(signed_add(amount.get(), cash.get())),\n ),\n\n # Validate the result is positive\n Assert(Not(signed_ltz(new_cash.get()))),\n\n # Update data\n data.principal.use(lambda principal:\n data.index.use(lambda index:\n data.set(\n new_cash,\n principal,\n index,\n )\n )\n ),\n\n cast(Expr, LocalStateHandler.set_position(account, instrument_id, data)),\n )"
},
{
"identifier": "perform_pool_move",
"path": "contracts_unified/core/internal/perform_pool_move.py",
"snippet": "@ABIReturnSubroutine\ndef perform_pool_move(\n account: AccountAddress,\n instrument_id: InstrumentId,\n transfer_amount: SignedAmount,\n) -> Expr:\n \"\"\"\n Transfers from the user to the pool `transfer_amount`.\n The function takes the following steps:\n 1. Calculates global accrued interest\n 2. Capitalizes the users balance by updating the\n user's principal with the user's accrued interest\n 3. Transfer between the user and the pool\n\n Parameters\n ----------\n user_position: current pool position of the user on `instrument_id`\n instrument_id: instrument index\n transfer_amount: amount to be transfered from the user to the pool.\n a positive value indicates that the user is sending to the pool (repaying and/or subscribing)\n a negative value indicates that the user is receiving from the pool (borrowing and/or redeeming)\n output: the user's position on the pool after the transfer\n \"\"\"\n\n # Instrument's attributes that change as part of this operation\n new_pool_last_update_time = RelativeTimestamp()\n old_pool_last_update_time = RelativeTimestamp()\n\n new_pool_borrowed = Amount()\n old_pool_borrowed = Amount()\n\n new_pool_liquidity = Amount()\n old_pool_liquidity = Amount()\n\n new_pool_borrow_index = InterestRate()\n old_pool_borrow_index = InterestRate()\n\n new_pool_lend_index = InterestRate()\n old_pool_lend_index = InterestRate()\n\n # Instrument attributes that are not affected by this operation\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n\n optimal_utilization_ratio = Ratio()\n optimal_utilization_rate = InterestRate()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n\n # User's attributes\n user_position = UserInstrumentData()\n new_user_principal = SignedAmount()\n old_user_principal = SignedAmount()\n new_user_index = InterestRate()\n old_user_index = InterestRate()\n new_user_cash = Amount()\n old_user_cash = Amount()\n\n # Variables for intermediate calculations\n old_utilization_rate = InterestRate()\n old_interest_rate = InterestRate()\n delta_time = Timestamp()\n compounding_per_second_rate = InterestRate()\n compounding_per_period_rate = InterestRate()\n\n pool_accrued_interest = Amount()\n\n liquidity_transfer = SignedAmount()\n borrowed_transfer = SignedAmount()\n\n remainder = SignedAmount()\n\n instrument_state = InstrumentListElement()\n new_instrument_state = InstrumentListElement()\n\n return Seq(\n # Loads current instrument state\n instrument_state.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n\n instrument_state.asset_id.store_into(asset_id),\n # Loads pool data\n instrument_state.last_update_time.store_into(old_pool_last_update_time),\n instrument_state.borrowed.store_into(old_pool_borrowed),\n instrument_state.liquidity.store_into(old_pool_liquidity),\n instrument_state.borrow_index.store_into(old_pool_borrow_index),\n instrument_state.lend_index.store_into(old_pool_lend_index),\n\n # Loads interest curve data\n instrument_state.optimal_utilization.store_into(optimal_utilization_ratio),\n optimal_utilization_rate.set(WideRatio([optimal_utilization_ratio.get(), Int(RATE_ONE)], [Int(RATIO_ONE)])),\n instrument_state.min_rate.store_into(min_rate),\n instrument_state.opt_rate.store_into(opt_rate),\n instrument_state.max_rate.store_into(max_rate),\n\n # Loads haircuts and margins\n instrument_state.initial_haircut.store_into(initial_haircut),\n instrument_state.initial_margin.store_into(initial_margin),\n instrument_state.maintenance_haircut.store_into(maintenance_haircut),\n instrument_state.maintenance_margin.store_into(maintenance_margin),\n\n # Calculates the new timestamp\n # NOTE: Updates to this can be controlled via the algosdk function setBlockOffsetTimestamp\n new_pool_last_update_time.set(GlobalStateHandler.get_relative_timestamp()),\n\n ###############################################################################################################\n # 1.\n # Calculates the accrued interest in the pool since the last update\n # and reflects that on the total liquidity and borrow amount.\n\n # 1.1\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n\n # 1.1.1\n # Calculates the pool's utilization\n # U_{t-1} = B_{t-1} / L_{t-1} = B_{t-1} * 1 / L_{t-1}\n old_utilization_rate.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(Int(0))\n .Else(WideRatio([old_pool_borrowed.get(), Int(RATE_ONE)], [old_pool_liquidity.get()]))\n ),\n\n # 1.1.2\n # Calculates interest rate per second for the period since the last update\n # R_{t-1} = R_min + U_{t-1} / U_opt * R_slope1 if U_{t-1} < U_opt\n # R_{t-1} = R_opt + (U_{t-1}-U_opt) / (1 - U_opt) * R_slope2 if U_{t-1} >= U_opt\n old_interest_rate.set(\n If(old_utilization_rate.get() < optimal_utilization_rate.get())\n .Then(\n min_rate.get()\n + WideRatio(\n [old_utilization_rate.get(), opt_rate.get() - min_rate.get()],\n [optimal_utilization_rate.get()]\n )\n )\n .Else(\n opt_rate.get()\n + WideRatio(\n [old_utilization_rate.get() - optimal_utilization_rate.get(), max_rate.get() - opt_rate.get()],\n [Int(RATE_ONE) - optimal_utilization_rate.get()]\n )\n )\n ),\n\n # 1.1.3\n # Calculates time since previous update\n delta_time.set(new_pool_last_update_time.get() - old_pool_last_update_time.get()),\n\n # 1.1.4\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n compounding_per_second_rate.set(Int(RATE_ONE) + old_interest_rate.get()),\n compounding_per_period_rate.set(teal_expt(compounding_per_second_rate, delta_time)),\n pool_accrued_interest.set(\n WideRatio(\n [compounding_per_period_rate.get() - Int(RATE_ONE), old_pool_borrowed.get()],\n [Int(RATE_ONE)],\n )\n ),\n\n # 1.2\n # Capitalize pool accrued interest into liquidity and borrowed amounts\n new_pool_borrowed.set(old_pool_borrowed.get() + pool_accrued_interest.get()),\n new_pool_liquidity.set(old_pool_liquidity.get() + pool_accrued_interest.get()),\n\n # 1.3\n # Updates pool indexes\n new_pool_borrow_index.set(\n If(old_pool_borrowed.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_borrow_index.get(), new_pool_borrowed.get()], [old_pool_borrowed.get()])\n )\n ),\n new_pool_lend_index.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_lend_index.get(), new_pool_liquidity.get()], [old_pool_liquidity.get()])\n )\n ),\n\n # We only perform the pool move if a user was given, otherwise we just update the global instrument data\n If(account.get() != Global.zero_address()).Then(\n ###############################################################################################################\n # 2\n # Get user data\n user_position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n user_position.cash.store_into(old_user_cash),\n user_position.principal.store_into(old_user_principal),\n user_position.index.store_into(old_user_index),\n\n # Capitalize user's accrued interest into user's principal\n new_user_principal.set(\n If(old_user_index.get() == Int(0))\n .Then(\n Int(0)\n )\n .ElseIf(signed_ltz(old_user_principal.get()))\n .Then(\n # The user has a borrow position\n calculate_accrued_borrow(old_user_principal, old_user_index, new_pool_borrow_index)\n )\n .Else(\n # The user has a lend position\n calculate_accrued_lend(old_user_principal, old_user_index, new_pool_lend_index)\n )\n ),\n\n ###############################################################################################################\n # 3\n # Transfer between the user and the pool\n\n # 3.0 Validate user's position against pool size\n Assert(new_pool_liquidity.get() >= signed_max(Int(0), new_user_principal.get())),\n # NOTE: The case for the user repaying more than the pool has borrowed is handled below\n # in order to handle zero-interest borrow case\n\n # 3.1 Updates the pool borrowed and liquitiy amounts\n\n # 3.1.1 Decompose the transfer_amount into borrowed_transfer and liquidity_transfer\n # such that:\n # a. transfer_amount == borrowed_transfer + liquidity_transfer\n # b. sign(transfer_amount) == sign(borrowed_transfer) == sign(liquidity_transfer)\n # c. if transfer_amount <=0:\n # # User cannot redeem more than its long position\n # liquidity_transfer = max(transfer_amount, min(0, -new_user_principal))\n # else:\n # # User must repay before subscribing\n # liquidity_transfer = max(transfer_amount + min(0, new_user_principal), 0)\n #\n # In other words:\n # - If transfer_amount is negative, then liquidity_transfer represents the\n # amount that the user is redeeming from the pool, and borrowed_transfer the amount that is\n # borrowing from the pool.\n # - If transfer_amount is positive, then liquidity_transfer represents the\n # amount that the user is subscribing to the pool, and borrowed_transfer the amount that is\n # repaying to the pool.\n liquidity_transfer.set(\n signed_max(\n signed_add(\n transfer_amount.get(),\n signed_min(Int(0), new_user_principal.get())\n ),\n signed_min(Int(0), signed_neg(new_user_principal.get()))\n )\n ),\n borrowed_transfer.set(\n signed_sub(\n transfer_amount.get(),\n liquidity_transfer.get()\n )\n ),\n\n # 3.1.2 Applies the liquidity_transfer and borrowed_transfer to the pool\n new_pool_borrowed.set(signed_sub(new_pool_borrowed.get(), borrowed_transfer.get())),\n\n # Handles the case where the user repays more than the pool has borrowed\n # This will happen when there are accumulated microunits of interest\n If(signed_ltz(new_pool_borrowed.get())).Then(\n # Remainder is whatever is left in the transfer after repaying all pool borrows\n remainder.set(signed_neg(new_pool_borrowed.get())),\n\n # New liquidity index is updated to reflect the remainder\n # liquidity_index' = liquidity_index + liquidity_index * remainder / pool_liquidity\n new_pool_lend_index.set(new_pool_lend_index.get() + WideRatio([new_pool_lend_index.get(), remainder.get()], [new_pool_liquidity.get()])),\n\n # New liquidity includes the remainder\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), remainder.get())),\n\n # Borrowed is cleared to remain always positive\n new_pool_borrowed.set(Int(0))\n ),\n\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), liquidity_transfer.get())),\n\n # 3.1.3 Validate the pool has sufficient liquidity to perform the operation\n Assert(new_pool_liquidity.get() >= new_pool_borrowed.get()),\n\n # 3.2 Update user's principal and cash\n new_user_principal.set(signed_add(new_user_principal.get(), transfer_amount.get())),\n new_user_cash.set(signed_sub(old_user_cash.get(), transfer_amount.get())),\n Assert(Not(signed_ltz(new_user_cash.get()))),\n\n # 3.3 Update user's index\n new_user_index.set(\n If(signed_ltz(new_user_principal.get()))\n .Then(new_pool_borrow_index.get())\n .Else(new_pool_lend_index.get())\n ),\n\n # Update user\n user_position.set(new_user_cash, new_user_principal, new_user_index),\n cast(Expr, LocalStateHandler.set_position(account, instrument_id, user_position)),\n ),\n\n # Update liquidity pool\n new_instrument_state.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n new_pool_last_update_time,\n new_pool_borrow_index,\n new_pool_lend_index,\n optimal_utilization_ratio,\n min_rate,\n opt_rate,\n max_rate,\n new_pool_borrowed,\n new_pool_liquidity,\n ),\n\n # Update instrument\n cast(Expr, GlobalStateHandler.set_instrument(instrument_id, new_instrument_state)),\n )"
},
{
"identifier": "setup",
"path": "contracts_unified/core/internal/setup.py",
"snippet": "@Subroutine(TealType.none)\ndef setup(opup_amount: Expr) -> Expr:\n \"\"\"Setup the required pre-method OpUp and state handlers\"\"\"\n\n target = AppId()\n i = abi.Uint64()\n\n return Seq(\n # Get target\n # FIXME: Use the price caster when we can\n target.set(Txn.applications[1]),\n\n # Loop over the opup request\n # NOTE: We can't use the PyTEAL op-up because of ABI issues\n i.set(opup_amount),\n While(i.get() >= Global.min_txn_fee()).Do(\n InnerTxnBuilder.ExecuteMethodCall(\n app_id=target.get(),\n method_signature=\"nop()void\",\n args=[],\n extra_fields={TxnField.fee: Int(0)}\n ),\n i.set(i.get() - Global.min_txn_fee()),\n ),\n )"
},
{
"identifier": "sender_is_sig_validator",
"path": "contracts_unified/core/internal/validate_sender.py",
"snippet": "@ABIReturnSubroutine\ndef sender_is_sig_validator() -> Expr:\n \"\"\"Validates the sender is the signature validator \"\"\"\n\n return Assert(GlobalStateHandler.get_signature_validator() == Txn.sender())"
},
{
"identifier": "LocalStateHandler",
"path": "contracts_unified/core/state_handler/local_handler.py",
"snippet": "class LocalStateHandler:\n \"\"\"Handles per-user state for the Core contract\"\"\"\n\n position_size = abi.make(UserInstrumentData).type_spec().byte_length_static()\n\n # NOTE: Not a subroutine for performance reasons\n @staticmethod\n def initialize_or_resize_if_required(account: AccountAddress, offset: abi.Uint64) -> Expr:\n \"\"\"Sets up the box for the given account if it does not exist or needs to be resized\"\"\"\n return Seq(\n (box_contents := App.box_get(account.get())),\n # If the box is not big enough, we enlarge it\n If(Len(box_contents.value()) <= offset.get()).Then(\n # Both delete and create work whether the box exists already or not\n Pop(App.box_delete(account.get())),\n Pop(App.box_create(account.get(), offset.get() + Int(LocalStateHandler.position_size))),\n App.box_replace(account.get(), Int(0), box_contents.value()),\n # Ensure we have enough funds for mbr\n cast(Expr, GlobalStateHandler.ensure_mbr_fund()),\n )\n )\n\n @staticmethod\n @ABIReturnSubroutine\n def get_position(\n account: AccountAddress,\n instrument_id: InstrumentId,\n *,\n output: UserInstrumentData\n ) -> Expr:\n \"\"\"Returns the cash and pool data for the given instrument ID\"\"\"\n\n offset = abi.Uint64()\n\n return Seq(\n offset.set(instrument_id.get() * Int(LocalStateHandler.position_size)),\n # NOTE: Initialize the box if it doesn't exist.\n # This should only happen for the fee target if it didn't deposit/initialize itself already\n # To prevent that condition from causing failures, we initialize here\n # We will also resize the box if it's not big enough to hold the required instrument offset\n cast(Expr, LocalStateHandler.initialize_or_resize_if_required(account, offset)),\n output.decode(App.box_extract(account.get(), offset.get(), Int(LocalStateHandler.position_size)))\n )\n\n # NOTE: Not a subroutine for performance reasons\n @staticmethod\n def set_position(account: AccountAddress, instrument_id: InstrumentId, data: UserInstrumentData) -> Expr:\n \"\"\"Sets the cash and pool data for the given instrument ID\"\"\"\n return App.box_replace(account.get(), instrument_id.get() * Int(LocalStateHandler.position_size), data.encode())\n\n @staticmethod\n @ABIReturnSubroutine\n def get_user_instrument_count(account: AccountAddress, *, output: abi.Uint64) -> Expr:\n \"\"\"Gets the amount of instruments allocated for an user\"\"\"\n return Seq(\n (box_length := App.box_length(account.get())),\n output.set(box_length.value() / Int(LocalStateHandler.position_size)),\n If(output.get() > GlobalStateHandler.get_instrument_count()).Then(\n output.set(GlobalStateHandler.get_instrument_count())\n )\n )"
},
{
"identifier": "AccountAddress",
"path": "contracts_unified/library/c3types.py",
"snippet": "class SignedInstrumentAmount(abi.NamedTuple):\nclass LiquidationFactors(abi.NamedTuple):\nclass InstrumentListElement(abi.NamedTuple):\nclass UserInstrumentData(abi.NamedTuple):\nclass OnChainOrderData(abi.NamedTuple):\nclass WormholeAddress(abi.NamedTuple):\nclass DecodedWormholePayload(abi.NamedTuple):"
},
{
"identifier": "DelegationChain",
"path": "contracts_unified/library/c3types_user.py",
"snippet": "class OperationId:\nclass SigningMethod:\nclass SignedHeader(abi.NamedTuple):\nclass OperationMetaData(abi.NamedTuple):\nclass WithdrawData(abi.NamedTuple):\nclass PoolMoveData(abi.NamedTuple):\nclass DelegationData(abi.NamedTuple):\nclass LiquidationData(abi.NamedTuple):\nclass AccountMoveData(abi.NamedTuple):\nclass OrderData(abi.NamedTuple):"
},
{
"identifier": "PRICECASTER_RESCALE_FACTOR",
"path": "contracts_unified/library/constants.py",
"snippet": "PRICECASTER_RESCALE_FACTOR = 10**9"
},
{
"identifier": "get_normalized_price",
"path": "contracts_unified/library/pricecaster.py",
"snippet": "@ABIReturnSubroutine\ndef get_normalized_price(instrument_id: InstrumentId, *, output: abi.Uint64) -> Expr:\n \"\"\"Read data from the pricecaster\"\"\"\n\n entry_size = abi.make(PricecasterEntry).type_spec().byte_length_static()\n slot_size = 128 - 1\n\n entry = PricecasterEntry()\n pricecaster = AppId()\n ptr = abi.Uint64()\n start = abi.Uint64()\n end = abi.Uint64()\n data = ScratchVar(TealType.bytes)\n\n return Seq(\n # Get the pricecaster id\n pricecaster.set(GlobalStateHandler.get_pricecaster_id()),\n\n # Calculate base pointer of data in blob\n ptr.set(instrument_id.get() * Int(entry_size)),\n\n # Get start page\n start.set(ptr.get() / Int(slot_size)),\n\n # Get end page\n end.set((ptr.get() + Int(entry_size)) / Int(slot_size)),\n\n # Load first page of data\n page := App.globalGetEx(pricecaster.get(), _get_key(start.get())),\n Assert(page.hasValue()),\n data.store(page.value()),\n\n # Check for more data\n If(start.get() < end.get())\n .Then(\n page2 := App.globalGetEx(pricecaster.get(), _get_key(end.get())),\n Assert(page2.hasValue()),\n data.store(Concat(data.load(), page2.value())),\n ),\n\n # Extract entry\n entry.decode(Extract(data.load(), ptr.get() % Int(slot_size), Int(entry_size))),\n entry.normalized_price.store_into(output)\n )"
},
{
"identifier": "signed_add",
"path": "contracts_unified/library/signed_math.py",
"snippet": "@Subroutine(TealType.uint64)\ndef signed_add(lhs: Expr, rhs: Expr) -> Expr:\n \"\"\"Signed addition\"\"\"\n add_result = MultiValue(\n Op.addw,\n [TealType.uint64, TealType.uint64],\n args=[lhs, rhs],\n # TODO: add compile check to check version\n )\n\n return Seq(\n # Find sum\n add_result,\n (signed := abi.Uint64()).set(signed_ltz(lhs)),\n # Detect overflow when both inputs have the same sign and the result has a different sign\n Assert(\n Or(\n signed.get() != signed_ltz(rhs),\n signed.get() == signed_ltz(add_result.output_slots[1].load()),\n )\n ),\n add_result.output_slots[1].load(),\n )"
},
{
"identifier": "signed_gte",
"path": "contracts_unified/library/signed_math.py",
"snippet": "@Subroutine(TealType.uint64)\ndef signed_gte(lhs: Expr, rhs: Expr) -> Expr:\n \"\"\"Signed greater than or equal to\"\"\"\n return Seq(\n If(signed_ltz(lhs))\n .Then(If(signed_ltz(rhs), lhs >= rhs, Int(0)))\n .Else(If(signed_ltz(rhs), Int(1), lhs >= rhs))\n )"
},
{
"identifier": "signed_ltz",
"path": "contracts_unified/library/signed_math.py",
"snippet": "def signed_ltz(value: Expr) -> Expr:\n \"\"\"Signed less than zero\"\"\"\n return value & Int(0x8000000000000000)"
},
{
"identifier": "signed_neg",
"path": "contracts_unified/library/signed_math.py",
"snippet": "@Subroutine(TealType.uint64)\ndef signed_neg(value: Expr) -> Expr:\n \"\"\"Signed negation\"\"\"\n # Special case for zero because of wrap around\n return If(Not(value), value, ~value + Int(1))"
}
] | from typing import cast
from pyteal import (
ABIReturnSubroutine,
Assert,
Expr,
If,
Int,
Not,
Or,
Seq,
WideRatio,
abi,
)
from contracts_unified.core.internal.health_check import health_check
from contracts_unified.core.internal.move import signed_add_to_cash
from contracts_unified.core.internal.perform_pool_move import perform_pool_move
from contracts_unified.core.internal.setup import setup
from contracts_unified.core.internal.validate_sender import sender_is_sig_validator
from contracts_unified.core.state_handler.local_handler import LocalStateHandler
from contracts_unified.library.c3types import (
AccountAddress,
Amount,
ExcessMargin,
InstrumentId,
Price,
SignedAmount,
UserInstrumentData,
)
from contracts_unified.library.c3types_user import (
DelegationChain,
OperationId,
OperationMetaData,
PoolMoveData,
)
from contracts_unified.library.constants import PRICECASTER_RESCALE_FACTOR
from contracts_unified.library.pricecaster import get_normalized_price
from contracts_unified.library.signed_math import (
signed_add,
signed_gte,
signed_ltz,
signed_neg,
) | 7,297 | """
Implements Core contract method for transferring user's instruments to/from a pool.
"""
@ABIReturnSubroutine
def pool_move(
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
_server_data: abi.DynamicBytes,
opup_budget: Amount,
) -> Expr:
"""Transfers instruments from user's address to the pool
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing a basket of instruments.
_delegation_chain (DelegationChain): Delegation chain. Unused.
_server_data (abi.DynamicBytes): Server data. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
user_old_health = ExcessMargin()
user_health = ExcessMargin()
data = PoolMoveData()
instrument = InstrumentId()
amount = SignedAmount()
user_data = UserInstrumentData()
price = Price()
cash = Amount()
neg_cash = SignedAmount()
return Seq(
setup(opup_budget.get()),
# Load constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Get basket from user_op.data
user_op.operation.use(lambda op_data:
Seq(
data.decode(op_data.get()),
data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),
instrument.set(data.instrument),
amount.set(data.amount),
)
),
# Get old health
user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),
# Move funds
cast(Expr, perform_pool_move(account, instrument, amount)),
# When there is a negative movement, we need to check that the user can support itself without netting
If(signed_ltz(amount.get())).Then(
# Get instrument price
price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),
# Extract user cash
user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),
cash.set(user_data.cash),
| """
Implements Core contract method for transferring user's instruments to/from a pool.
"""
@ABIReturnSubroutine
def pool_move(
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
_server_data: abi.DynamicBytes,
opup_budget: Amount,
) -> Expr:
"""Transfers instruments from user's address to the pool
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing a basket of instruments.
_delegation_chain (DelegationChain): Delegation chain. Unused.
_server_data (abi.DynamicBytes): Server data. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
user_old_health = ExcessMargin()
user_health = ExcessMargin()
data = PoolMoveData()
instrument = InstrumentId()
amount = SignedAmount()
user_data = UserInstrumentData()
price = Price()
cash = Amount()
neg_cash = SignedAmount()
return Seq(
setup(opup_budget.get()),
# Load constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Get basket from user_op.data
user_op.operation.use(lambda op_data:
Seq(
data.decode(op_data.get()),
data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),
instrument.set(data.instrument),
amount.set(data.amount),
)
),
# Get old health
user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),
# Move funds
cast(Expr, perform_pool_move(account, instrument, amount)),
# When there is a negative movement, we need to check that the user can support itself without netting
If(signed_ltz(amount.get())).Then(
# Get instrument price
price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),
# Extract user cash
user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),
cash.set(user_data.cash), | neg_cash.set(signed_neg(cash.get())), | 13 | 2023-11-17 20:54:15+00:00 | 12k |
gunderson-dettmer/CE2OCF | CE2OCF/ocf/generators/vesting_enums_to_ocf.py | [
{
"identifier": "load_cic_event_definition",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_cic_event_definition(source_json: Path = DEFAULT_CIC_DEFS_PATH) -> CicEventDefinition:\n with source_json.open(\"r\") as config_file:\n return json.loads(config_file.read())"
},
{
"identifier": "load_double_trigger_definitions",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_double_trigger_definitions(\n source_json: Path = DEFAULT_DOUBLE_TRIG_DEFS_PATH,\n) -> dict[str, Optional[TerminationDetails]]:\n with source_json.open(\"r\") as config_file:\n return json.loads(config_file.read())"
},
{
"identifier": "load_single_trigger_definitions",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_single_trigger_definitions(\n source_json: Path = DEFAULT_SINGLE_TRIG_DEFS_PATH,\n) -> dict[str, Optional[CicEventDefinition]]:\n with source_json.open(\"r\") as config_file:\n return json.loads(config_file.read())"
},
{
"identifier": "generate_accel_trigger_termination_event_id",
"path": "CE2OCF/ocf/generators/ocf_id_generators.py",
"snippet": "def generate_accel_trigger_termination_event_id(\n vesting_schedule_id: str | int,\n trigger_type: typing.Literal[\"Single\", \"Double\"] = \"Double\",\n) -> str:\n return f\"{vesting_schedule_id} | {trigger_type} Trigger Termination Event\""
},
{
"identifier": "generate_cic_event_id",
"path": "CE2OCF/ocf/generators/ocf_id_generators.py",
"snippet": "def generate_cic_event_id(\n vesting_schedule_id: str | int,\n trigger_type: typing.Literal[\"Single\", \"Double\"] = \"Double\",\n) -> str:\n return f\"{vesting_schedule_id} | {trigger_type} Trigger CiC Event\""
},
{
"identifier": "generate_time_based_accel_expiration_event_id",
"path": "CE2OCF/ocf/generators/ocf_id_generators.py",
"snippet": "def generate_time_based_accel_expiration_event_id(vesting_schedule_id: str | int, modifier: str = \"\") -> str:\n return f\"{vesting_schedule_id} | Post-CiC Accel Exp{' ' if modifier != '' else ''}{modifier}\""
},
{
"identifier": "generate_vesting_start_id",
"path": "CE2OCF/ocf/generators/ocf_id_generators.py",
"snippet": "def generate_vesting_start_id(schedule_id: str) -> str:\n return f\"{schedule_id} | Start\""
},
{
"identifier": "generate_cliff_vesting_condition_id",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_cliff_vesting_condition_id(schedule_id: str) -> str:\n return f\"{schedule_id} | Cliff Vest\""
},
{
"identifier": "generate_event_based_vesting_condition",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_event_based_vesting_condition(\n condition_id: str | None = None,\n next_condition_ids: list[str] | None = None,\n description: str = \"\",\n remainder: bool = False,\n portion_numerator: int | None = None,\n portion_denominator: int | None = None,\n quantity: int | None = None,\n) -> dict:\n \"\"\"\n\n Builds OCF for event-based vesting condition. Can we set to either a fixed quantity or\n portion of quantity - either remaining unvested (if remainder is True) or portion of original grant\n (if remainder is False).\n\n :param quantity: If you want to use an absolute quantity, set this BUT NOT portion_denominator and portion_numerator\n :param portion_denominator: If you want this to be determined as some fraction or percent of vesting shares set\n this and portion_numerator - NOT quantity\n :param portion_numerator: If you want this to be determined as some fraction or percent of vesting shares set\n this and portion_denominator - NOT quantity\n :param remainder: If false, the ratio is applied to the entire quantity of the security's issuance. If true,\n it is applied to the amount that has yet to vest.\n :param condition_id: ID for this condition or, if none provider, random uuid v4 generated\n :param next_condition_ids: What are subsequent, dependent vesting conditions? Default is None\n :param description: Plain text description. Inclusion of specific legal language is suggested\n :return: OCF vesting condition dictionary matching specified parameters\n \"\"\"\n\n logger.debug(\"Function: generate_event_based_vesting_condition\")\n logger.debug(\"Arguments:\")\n logger.debug(f\" condition_id: {condition_id}\")\n logger.debug(f\" next_condition_ids: {next_condition_ids}\")\n logger.debug(f\" description: {description}\")\n logger.debug(f\" remainder: {remainder}\")\n logger.debug(f\" portion_numerator: {portion_numerator}\")\n logger.debug(f\" portion_denominator: {portion_denominator}\")\n logger.debug(f\" quantity: {quantity}\")\n\n if next_condition_ids is None:\n next_condition_ids = []\n\n if not condition_id:\n condition_id = uuid.uuid4().__str__()\n\n if (portion_numerator is not None or portion_denominator is not None) and not (\n isinstance(portion_denominator, int) and isinstance(portion_numerator, int)\n ):\n raise ValueError(\n \"If you are going to use a portion, you need to provide portion_numerator and portion_denominator\"\n )\n\n if quantity is not None and (portion_numerator or portion_denominator):\n raise ValueError(\n \"If you use quantity (fixed number of security units) do not provide portion values or vice-versa\"\n )\n\n if quantity is not None and (portion_numerator or portion_denominator):\n raise ValueError(\n \"If you use quantity (fixed number of security units) do not provide portion values or vice-versa\"\n )\n\n condition_ocf: dict[str, Any] = {\n \"id\": condition_id,\n \"description\": description,\n \"next_condition_ids\": next_condition_ids,\n \"trigger\": {\"type\": \"VESTING_EVENT\"},\n }\n\n if quantity is not None:\n condition_ocf[\"quantity\"] = f\"{quantity}\"\n\n if isinstance(portion_numerator, int) and isinstance(portion_denominator, int):\n\n condition_ocf[\"portion\"] = {\n \"numerator\": f\"{portion_numerator}\",\n \"denominator\": f\"{portion_denominator}\",\n }\n\n if remainder is not None:\n condition_ocf[\"portion\"][\"remainder\"] = remainder\n\n return condition_ocf"
},
{
"identifier": "generate_monthly_vesting_condition_id",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_monthly_vesting_condition_id(schedule_id, modifier: str = \"\") -> str:\n return f\"{schedule_id} | Monthly Vesting{' ' if modifier != '' else ''}{modifier}\""
},
{
"identifier": "generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(\n vesting_start_condition_id: str,\n end_month: int = 48,\n cliff_month: int | None = 12,\n months_of_vest_credit_on_trigger: int = 6,\n ocf_event_generator: OcfEventGeneratorFunctionSig | None = None,\n) -> tuple[str, list[dict]]:\n \"\"\"\n\n This can be used to generate schedules with or without cliffs. For simplicity's sake, it must be\n expressed in months. \"Time-Served\" acceleration is basically where you say that someone's vesting should be\n calculated as if they served additional months.\n\n :param vesting_start_condition_id: To tie into any other parts of the vesting schedule, what is the vesting start\n condition this should be dependent upon. Could technically be any vesting\n condition id you want this to be based off of.\n :param end_month: How many months is the vesting schedule total.\n :param cliff_month: Must be before the end month - e.g. a cliff on month 498 or 49 or a 48-month schedule is stupid.\n :param months_of_vest_credit_on_trigger: If we're giving extra months to time served calculation, how many? This\n cannot be more credit months than you can use. E.g. if there's a cliff\n at 12 months and a 48-month vesting schedule, you can offer a max of 35\n months credit.\n :param ocf_event_generator: Callable to generate OCF events. You may want to use your own function if our default\n signature doesn't match what you need\n :return: a tuple, with element 0 being the id of the first vesting condition and element 1 being list of ocf\n conditions.\n\n\n \"\"\"\n\n vesting_period_type = \"MONTHS\"\n start_condition_id = \"\"\n conditions = []\n\n if isinstance(cliff_month, int):\n if cliff_month >= end_month:\n raise ValueError(\"Sorry, cliff month must be before the end month (or 0 for no cliff)\")\n\n if months_of_vest_credit_on_trigger > end_month - cliff_month:\n raise ValueError(\n \"Sorry, it doesn't make sense to acceleration give you more months credit than it's possible \"\n \"to actually use (e.g. a 13 month vesting credit on a 12 month vesting schedule is not allowed). \"\n )\n\n if cliff_month is None:\n shares_start_vesting_in_month_n = 0\n else:\n shares_start_vesting_in_month_n = cliff_month - months_of_vest_credit_on_trigger\n logger.debug(f\"Shares start vesting in month {shares_start_vesting_in_month_n}\")\n\n if shares_start_vesting_in_month_n < 0:\n shares_start_vesting_in_month_n = 0\n logger.debug(f\"Shares start vesting adjusted to {shares_start_vesting_in_month_n}\")\n\n months_fully_vested = end_month - months_of_vest_credit_on_trigger\n logger.debug(f\"Months fully vested: {months_fully_vested}\")\n\n if cliff_month is not None and shares_start_vesting_in_month_n > 0:\n start_condition_id = \"PRE-CLIFF-VEST-PERIOD\"\n conditions.extend(\n [\n {\n \"id\": \"PRE-CLIFF-VEST-PERIOD\",\n \"description\": \"Period during which no shares will vest, even with acceleration\",\n \"portion\": {\"numerator\": \"0\", \"denominator\": \"0\"},\n \"trigger\": {\n \"type\": \"VESTING_SCHEDULE_RELATIVE\",\n \"period\": {\n \"length\": cliff_month - months_of_vest_credit_on_trigger,\n \"type\": vesting_period_type,\n \"occurrences\": 1,\n \"day_of_month\": \"VESTING_START_DAY_OR_LAST_DAY_OF_MONTH\",\n },\n \"relative_to_condition_id\": vesting_start_condition_id,\n },\n \"next_condition_ids\": [\n f\"MONTH-{shares_start_vesting_in_month_n}-TO-{shares_start_vesting_in_month_n + 1}-ACCELERATED-\"\n \"AMT-VEST-PERIOD\",\n ],\n }\n ]\n )\n\n logger.debug(f\"Starting conditions:\\n{conditions}\")\n\n for i in range(shares_start_vesting_in_month_n, months_fully_vested + 1):\n\n logger.debug(f\"\\tCalculate vesting for month {i} - {i + 1}\")\n\n if i == shares_start_vesting_in_month_n:\n if shares_start_vesting_in_month_n > 0:\n relative_to_condition_id = \"PRE-CLIFF-VEST-PERIOD\"\n else:\n relative_to_condition_id = vesting_start_condition_id\n else:\n relative_to_condition_id = f\"MONTH-{i - 1}-TO-{i}-ACCELERATED-AMT-VEST-PERIOD\"\n\n logger.debug(f\"\\t\\tPrevious condition: {relative_to_condition_id}\")\n\n new_conditions = []\n\n if i < months_fully_vested:\n\n if i == shares_start_vesting_in_month_n:\n start_condition_id = f\"MONTH-{i}-TO-{i + 1}-ACCELERATED-AMT-VEST-PERIOD\"\n\n logger.debug(f\"\\t\\tI is {i}\")\n\n logger.debug(f\"\\t\\tPortion {i + months_of_vest_credit_on_trigger} / {end_month}\")\n new_conditions = [\n {\n \"id\": f\"MONTH-{i}-TO-{i + 1}-ACCELERATED-AMT-VEST-PERIOD\",\n \"description\": f\"Amount of shares that vest for single trigger acceleration on month {i} of \"\n f\"vesting schedule\",\n \"portion\": {\"numerator\": \"0\", \"denominator\": \"0\"},\n \"trigger\": {\n \"type\": \"VESTING_SCHEDULE_RELATIVE\",\n \"period\": {\n \"length\": 1,\n \"type\": vesting_period_type,\n \"occurrences\": 1,\n \"day_of_month\": \"VESTING_START_DAY_OR_LAST_DAY_OF_MONTH\",\n },\n \"relative_to_condition_id\": relative_to_condition_id,\n },\n \"next_condition_ids\": [\n f\"MONTH-{i + 1}-TO-{i + 2}-ACCELERATED-AMT-VEST-PERIOD\",\n f\"MONTH-{i}-TO-{i + 1}-ACCEL-VEST-AMOUNT\",\n ]\n if i < months_fully_vested - 1\n else [\n f\"POST-MONTH-{i + 1}-ACCELERATED-AMT-VEST-PERIOD\",\n f\"MONTH-{i}-TO-{i + 1}-ACCEL-VEST-AMOUNT\",\n ],\n },\n {\n \"id\": f\"MONTH-{i}-TO-{i + 1}-ACCEL-VEST-AMOUNT\",\n \"description\": f\"Holder is terminated during month {i} of vesting\",\n \"portion\": {\n \"numerator\": str(i + months_of_vest_credit_on_trigger),\n \"denominator\": str(end_month),\n },\n \"trigger\": {\"type\": \"VESTING_EVENT\"},\n \"next_condition_ids\": [],\n }\n if ocf_event_generator is None\n else ocf_event_generator(\n period_number=i,\n period_type=\"MONTHS\",\n on_or_after_fully_vested_cutoff=False,\n portion_numerator=i + months_of_vest_credit_on_trigger,\n portion_denominator=end_month,\n id=f\"MONTH-{i}-TO-{i + 1}-ACCEL-VEST-AMOUNT\",\n ),\n ]\n\n elif i == months_fully_vested:\n\n logger.debug(f\"\\t\\t💣 💣 Detected we are at month {months_fully_vested} - fully vested\")\n logger.debug(f\"\\t\\t\\tPortion {end_month} / {end_month}\")\n\n if i == shares_start_vesting_in_month_n:\n start_condition_id = f\"MONTH-{i}-AND-LATER-ACCEL-VEST-AMOUNT\"\n\n new_conditions = [\n {\n \"id\": f\"MONTH-{i}-AND-LATER-ACCEL-VEST-AMOUNT\",\n \"description\": f\"Holder is terminated on or after month #{i} of vesting\",\n \"portion\": {\"numerator\": str(end_month), \"denominator\": str(end_month)},\n \"trigger\": {\"type\": \"VESTING_EVENT\"},\n \"next_condition_ids\": [],\n }\n if ocf_event_generator is None\n else ocf_event_generator(\n period_number=i,\n period_type=\"MONTHS\",\n on_or_after_fully_vested_cutoff=True,\n portion_numerator=end_month,\n portion_denominator=end_month,\n id=f\"MONTH-{i}-AND-LATER-ACCEL-VEST-AMOUNT\",\n ),\n {\n \"id\": f\"POST-MONTH-{i}-ACCELERATED-AMT-VEST-PERIOD\",\n \"description\": f\"Accelerated vesting is fully vested on or after month {i} of vesting schedule\",\n \"portion\": {\"numerator\": \"0\", \"denominator\": \"0\"},\n \"trigger\": {\n \"type\": \"VESTING_SCHEDULE_RELATIVE\",\n \"period\": {\n \"length\": end_month - months_fully_vested,\n \"type\": vesting_period_type,\n \"occurrences\": 1,\n \"day_of_month\": \"VESTING_START_DAY_OR_LAST_DAY_OF_MONTH\",\n },\n \"relative_to_condition_id\": f\"MONTH-{i}-ACCEL-VEST-AMOUNT\",\n },\n \"next_condition_ids\": [f\"MONTH-{i}-AND-LATER-ACCEL-VEST-AMOUNT\"],\n },\n ]\n else:\n pass\n logger.debug(f\"\\t\\tNew conditions: {new_conditions}\")\n conditions.extend(new_conditions)\n\n return start_condition_id, conditions"
},
{
"identifier": "generate_vesting_condition_relative_time_based",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_vesting_condition_relative_time_based(\n relative_to_condition_id: str = \"\",\n condition_id: str | None = None,\n time_units: str | OcfPeriodTypeEnum = OcfPeriodTypeEnum.YEARS,\n time_unit_quantity: int = 1,\n time_period_repetition: int = 1,\n portion_numerator: int | str | None = None,\n portion_denominator: int | str | None = None,\n quantity: int | str | None = None,\n next_condition_ids: list[str] | None = None,\n vesting_day_of_month: OcfVestingDayOfMonthEnum = OcfVestingDayOfMonthEnum.VESTING_START_DAY_OR_LAST_DAY_OF_MONTH,\n remainder: bool | None = None,\n) -> dict:\n\n if next_condition_ids is None:\n next_condition_ids = []\n\n if not condition_id:\n condition_id = uuid.uuid4().__str__()\n\n if (portion_numerator is not None or portion_denominator is not None) and not (\n isinstance(portion_denominator, (str, int)) and isinstance(portion_numerator, (str, int))\n ):\n raise ValueError(\n \"If you are going to use a portion, you need to provide portion_numerator and portion_denominator\"\n )\n\n if quantity is not None and (portion_numerator or portion_denominator):\n raise ValueError(\n \"If you use quantity (fixed number of security units) do not provide portion values or vice-versa - \"\n f\"| provided quantity {quantity} portion_numerator {portion_numerator} | \"\n f\"portion_denominator: {portion_denominator}\"\n )\n\n condition: dict[str, Any] = {\n \"id\": condition_id,\n \"description\": f\"GD Autogenerated Time-Based Vesting Condition occurring every \"\n f\"{time_unit_quantity} {time_units}, \"\n f\"{time_period_repetition} times, after {relative_to_condition_id}\",\n \"trigger\": {\n \"type\": \"VESTING_SCHEDULE_RELATIVE\",\n \"period\": {\n \"length\": time_unit_quantity,\n \"type\": time_units,\n \"occurrences\": time_period_repetition,\n \"day_of_month\": vesting_day_of_month,\n },\n \"relative_to_condition_id\": relative_to_condition_id,\n },\n \"next_condition_ids\": next_condition_ids,\n }\n\n if quantity is not None:\n condition[\"quantity\"] = f\"{quantity}\"\n else:\n condition[\"portion\"] = {\n \"numerator\": f\"{portion_numerator}\",\n \"denominator\": f\"{portion_denominator}\",\n }\n\n if remainder is not None:\n condition[\"portion\"][\"remainder\"] = remainder\n\n return condition"
},
{
"identifier": "generate_vesting_start_condition",
"path": "CE2OCF/ocf/generators/ocf_vesting_conditions.py",
"snippet": "def generate_vesting_start_condition(\n next_condition_ids: list[str] | None = None,\n portion_numerator: int | None = None,\n portion_denominator: int | None = None,\n quantity: int | None = None,\n condition_id: str | None = None,\n remainder: bool | None = None,\n) -> dict:\n\n logger.debug(\"Function: generate_vesting_start_condition()\")\n logger.debug(\"Arguments:\")\n logger.debug(f\" next_condition_ids: {next_condition_ids}\")\n logger.debug(f\" portion_numerator: {portion_numerator}\")\n logger.debug(f\" portion_denominator: {portion_denominator}\")\n logger.debug(f\" quantity: {quantity}\")\n logger.debug(f\" condition_id: {condition_id}\")\n logger.debug(f\" remainder: {remainder}\")\n\n if (portion_numerator is not None or portion_denominator is not None) and not (\n isinstance(portion_denominator, int) and isinstance(portion_numerator, int)\n ):\n raise ValueError(\n \"If you are going to use a portion, you need to provide portion_numerator and portion_denominator\"\n )\n\n if quantity is not None and (portion_numerator or portion_denominator):\n raise ValueError(\n \"If you use quantity (fixed number of security units) do not provide portion values or vice-versa\"\n )\n\n if quantity == portion_numerator == portion_denominator is None:\n raise ValueError(\"You need to define either a portion or quantity based amount\")\n\n if next_condition_ids is None:\n next_condition_ids = []\n\n if not condition_id:\n condition_id = uuid.uuid4().__str__()\n\n condition: dict[str, Any] = {\n \"id\": condition_id,\n \"trigger\": {\"type\": \"VESTING_START_DATE\"},\n \"next_condition_ids\": next_condition_ids,\n }\n\n if quantity is not None:\n condition[\"quantity\"] = f\"{quantity}\"\n\n if portion_numerator and portion_denominator:\n condition[\"portion\"] = {\n \"numerator\": f\"{portion_numerator}\",\n \"denominator\": f\"{portion_denominator}\",\n }\n\n if remainder is not None:\n condition[\"portion\"][\"remainder\"] = remainder\n\n return condition"
},
{
"identifier": "cic_event_generator",
"path": "CE2OCF/ocf/generators/ocf_vesting_events.py",
"snippet": "def cic_event_generator(\n period_number: int = 0,\n period_type: str = \"MONTHS\",\n on_or_after_fully_vested_cutoff: bool = False,\n portion_numerator: int = 0,\n portion_denominator: int = 0,\n id: str = \"\",\n **kwargs,\n) -> dict:\n if on_or_after_fully_vested_cutoff:\n return generate_event_based_vesting_condition(\n condition_id=id,\n description=f\"There is a change in control on or after {period_type} {period_number} of vesting\",\n portion_denominator=portion_denominator,\n portion_numerator=portion_numerator,\n )\n else:\n return generate_event_based_vesting_condition(\n condition_id=id,\n description=f\"There is a change in control during month {period_number} of vesting\",\n portion_denominator=portion_denominator,\n portion_numerator=portion_numerator,\n )"
},
{
"identifier": "generate_change_in_control_event",
"path": "CE2OCF/ocf/generators/ocf_vesting_events.py",
"snippet": "def generate_change_in_control_event(\n vesting_schedule_id: str,\n cic_event_definition: CicEventDefinition,\n next_condition_ids: list[str] = [],\n) -> dict:\n\n cic_event_id = generate_cic_event_id(vesting_schedule_id)\n\n return generate_event_based_vesting_condition(\n condition_id=cic_event_id,\n **cic_event_definition,\n next_condition_ids=next_condition_ids,\n )"
},
{
"identifier": "generate_vesting_termination_event",
"path": "CE2OCF/ocf/generators/ocf_vesting_events.py",
"snippet": "def generate_vesting_termination_event(\n period_number: int = 0,\n period_type: str = \"MONTHS\",\n on_or_after_fully_vested_cutoff: bool = False,\n portion_numerator: int = 0,\n portion_denominator: int = 0,\n id: str = \"\",\n **kwargs,\n) -> dict:\n if on_or_after_fully_vested_cutoff:\n return generate_event_based_vesting_condition(\n condition_id=id,\n description=f\"Security holder terminated on or after {period_type} {period_number} of vesting\",\n portion_denominator=portion_denominator,\n portion_numerator=portion_numerator,\n )\n else:\n return generate_event_based_vesting_condition(\n condition_id=id,\n description=f\"Security holder terminated during month {period_number} of vesting\",\n portion_denominator=portion_denominator,\n portion_numerator=portion_numerator,\n )"
},
{
"identifier": "CicEventDefinition",
"path": "CE2OCF/types/dictionaries.py",
"snippet": "class CicEventDefinition(TypedDict):\n description: str\n remainder: bool\n portion_numerator: int\n portion_denominator: int"
},
{
"identifier": "TerminationDetails",
"path": "CE2OCF/types/dictionaries.py",
"snippet": "class TerminationDetails(TypedDict):\n time_based_expiration_details: Optional[TimeBasedExpirationDetails]\n termination_event_details: TerminationEventDetails"
},
{
"identifier": "DoubleTriggerTypesEnum",
"path": "CE2OCF/types/enums.py",
"snippet": "class DoubleTriggerTypesEnum(str, enum.Enum):\n NA = \"N/A\"\n TWENTY_FIVE_PERCENT_12_MONTHS = \"25% of unvested; Involuntary Termination within 12 months after CiC\"\n FIFTY_PERCENT_12_MONTHS = \"50% of unvested; Involuntary Termination within 12 months after CiC\"\n ONE_HUNDRED_PERCENT_12_MONTHS = \"100% of unvested; Involuntary Termination within 12 months after CiC\"\n TWENTY_FIVE_PERCENT_ANY_TIME = \"25% of unvested; Involuntary Termination any time after CiC\"\n FIFTY_PERCENT_ANY_TIME = \"50% of unvested; Involuntary Termination any time after CiC\"\n ONE_HUNDRED_PERCENT_ANY_TIME = \"100% of unvested; Involuntary Termination any time after CiC\"\n CUSTOM = \"Custom\""
},
{
"identifier": "OcfPeriodTypeEnum",
"path": "CE2OCF/types/enums.py",
"snippet": "class OcfPeriodTypeEnum(str, enum.Enum):\n DAYS = \"DAYS\"\n MONTHS = \"MONTHS\"\n YEARS = \"YEARS\""
},
{
"identifier": "SingleTriggerTypesEnum",
"path": "CE2OCF/types/enums.py",
"snippet": "class SingleTriggerTypesEnum(str, enum.Enum):\n NA = \"N/A\"\n SIX_MONTHS_ALL_TIMES = \"6 months; all times after CiC\"\n TWELVE_MONTHS_ALL_TIMES = \"12 months; all times after CiC\"\n TWENTY_FOUR_MONTHS_ALL_TIMES = \"24 months; all times after CiC\"\n ONE_HUNDRED_PERCENT_ALL_TIMES = \"100%; all times after CiC\"\n SIX_MONTHS_INVOLUNTARY_TERMINATION = \"6 months; Involuntary Termination\"\n TWELVE_MONTHS_INVOLUNTARY_TERMINATION = \"12 months; Involuntary Termination\"\n TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION = \"24 months; Involuntary Termination\"\n ONE_HUNDRED_PERCENT_INVOLUNTARY_TERMINATION = \"100%; Involuntary Termination\"\n CUSTOM = \"Custom\""
},
{
"identifier": "VestingTypesEnum",
"path": "CE2OCF/types/enums.py",
"snippet": "class VestingTypesEnum(str, enum.Enum):\n FOUR_YR_1_YR_CLIFF = \"4yr with 1yr Cliff\"\n FOUR_YR_NO_CLIFF = \"4yr with no Cliff\"\n FULLY_VESTED = \"Fully Vested\"\n CUSTOM = \"Custom\" # We're not going to support this via OCF"
},
{
"identifier": "logger",
"path": "CE2OCF/utils/log_utils.py",
"snippet": ""
}
] | from CE2OCF.datamap.loaders import (
load_cic_event_definition,
load_double_trigger_definitions,
load_single_trigger_definitions,
)
from CE2OCF.ocf.generators.ocf_id_generators import (
generate_accel_trigger_termination_event_id,
generate_cic_event_id,
generate_time_based_accel_expiration_event_id,
generate_vesting_start_id,
)
from CE2OCF.ocf.generators.ocf_vesting_conditions import (
generate_cliff_vesting_condition_id,
generate_event_based_vesting_condition,
generate_monthly_vesting_condition_id,
generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration,
generate_vesting_condition_relative_time_based,
generate_vesting_start_condition,
)
from CE2OCF.ocf.generators.ocf_vesting_events import (
cic_event_generator,
generate_change_in_control_event,
generate_vesting_termination_event,
)
from CE2OCF.types.dictionaries import (
CicEventDefinition,
TerminationDetails,
)
from CE2OCF.types.enums import (
DoubleTriggerTypesEnum,
OcfPeriodTypeEnum,
SingleTriggerTypesEnum,
VestingTypesEnum,
)
from CE2OCF.utils.log_utils import logger | 9,004 | end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FULLY_VESTED:
# shouldn't be vesting conditions
pass
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
pass
return start_condition_id, condition_ocf_objs
def generate_double_trigger_conditions_from_enumerations(
double_trigger_type: DoubleTriggerTypesEnum,
vesting_schedule_id: str,
cic_event_definition: CicEventDefinition | None = None,
double_trigger_termination_details: dict[str, TerminationDetails | None] | None = None,
) -> list[dict]:
if cic_event_definition is None:
cic_event_definition = load_cic_event_definition()
if double_trigger_termination_details is None:
double_trigger_termination_details = load_double_trigger_definitions()
condition_ocf_objs: list[dict] = []
if double_trigger_type not in double_trigger_termination_details:
raise ValueError(
f"Provided double trigger value ({double_trigger_type}) not supported in "
f"double_trigger_termination_details mapping object "
)
details = double_trigger_termination_details[double_trigger_type]
# If mapping table maps to None don't generate anything...
if details is None:
return condition_ocf_objs
cic_event_id = generate_cic_event_id(vesting_schedule_id, "Double")
time_based_expiration_details = details["time_based_expiration_details"]
time_based_expiration_event_id = (
None
if time_based_expiration_details is None
else generate_time_based_accel_expiration_event_id(vesting_schedule_id, "Double")
)
termination_event_details = details["termination_event_details"]
termination_event_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Double")
# generate the cic event first and set next_condition_ids to include the expiration event
# if applicable, otherwise, just the termination event
condition_ocf_objs.append(
generate_change_in_control_event(
vesting_schedule_id=vesting_schedule_id,
cic_event_definition=cic_event_definition,
next_condition_ids=[
*([time_based_expiration_event_id] if time_based_expiration_event_id is not None else []),
termination_event_id,
]
if time_based_expiration_details is not None
else [termination_event_id],
)
)
# If there is a time-based expiration
if time_based_expiration_details is not None:
condition_ocf_objs.append(
| from __future__ import annotations
def generate_single_trigger_conditions_from_enumerations(
single_trigger_type: SingleTriggerTypesEnum | str,
vesting_schedule_type: VestingTypesEnum | str,
vesting_schedule_id: str,
single_trigger_termination_details: dict[str, CicEventDefinition | None] | None = None,
) -> tuple[str, list[dict]]:
"""
Generates required single trigger vesting conditions from our enums.
:param single_trigger_type:
:param vesting_schedule_type:
:param vesting_schedule_id:
:PARAM termination_details:
:return: A tuple - element 0 is start condition of id of the generated schedule. Element 1 is the actual list of
ocf objs.
Args:
termination_details:
"""
if single_trigger_termination_details is None:
single_trigger_termination_details = load_single_trigger_definitions()
if isinstance(single_trigger_type, str):
single_trigger_type = SingleTriggerTypesEnum(single_trigger_type)
if isinstance(vesting_schedule_type, str):
vesting_schedule_type = VestingTypesEnum(vesting_schedule_type)
condition_ocf_objs = []
start_condition_id = ""
if vesting_schedule_type == VestingTypesEnum.CUSTOM:
raise ValueError("Custom vesting schedule with single trigger acceleration not implemented")
if single_trigger_type == SingleTriggerTypesEnum.CUSTOM:
raise ValueError("Custom single trigger acceleration not implemented")
single_trigger_vals = single_trigger_termination_details[single_trigger_type]
assert single_trigger_vals is not None
if single_trigger_type == SingleTriggerTypesEnum.ONE_HUNDRED_PERCENT_INVOLUNTARY_TERMINATION:
logger.debug(
f"INFO - vesting_schedule_type arg {vesting_schedule_type} has no effect for {single_trigger_type} accel"
)
start_condition_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Single")
condition_ocf_objs.append(
generate_event_based_vesting_condition(
condition_id=start_condition_id,
**single_trigger_vals,
)
)
elif single_trigger_type == SingleTriggerTypesEnum.ONE_HUNDRED_PERCENT_ALL_TIMES:
logger.debug(
f"INFO - vesting_schedule_type arg {vesting_schedule_type} has no effect for {single_trigger_type} accel"
)
start_condition_id = generate_cic_event_id(vesting_schedule_id, "Single")
condition_ocf_objs.append(
generate_event_based_vesting_condition(
condition_id=start_condition_id,
**single_trigger_vals,
)
)
else:
# for acceleration where you get credited extra months of vesting... the resulting output
# looks very different for a pure monthly schedule vs a schedule with a cliff.
if vesting_schedule_type == VestingTypesEnum.FOUR_YR_NO_CLIFF:
# These are CiC-based triggers
if single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FOUR_YR_1_YR_CLIFF:
# Since these are OVER the cliff, we can just add 12/48 or 24/48 portion
if single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FULLY_VESTED:
# shouldn't be vesting conditions
pass
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
pass
return start_condition_id, condition_ocf_objs
def generate_double_trigger_conditions_from_enumerations(
double_trigger_type: DoubleTriggerTypesEnum,
vesting_schedule_id: str,
cic_event_definition: CicEventDefinition | None = None,
double_trigger_termination_details: dict[str, TerminationDetails | None] | None = None,
) -> list[dict]:
if cic_event_definition is None:
cic_event_definition = load_cic_event_definition()
if double_trigger_termination_details is None:
double_trigger_termination_details = load_double_trigger_definitions()
condition_ocf_objs: list[dict] = []
if double_trigger_type not in double_trigger_termination_details:
raise ValueError(
f"Provided double trigger value ({double_trigger_type}) not supported in "
f"double_trigger_termination_details mapping object "
)
details = double_trigger_termination_details[double_trigger_type]
# If mapping table maps to None don't generate anything...
if details is None:
return condition_ocf_objs
cic_event_id = generate_cic_event_id(vesting_schedule_id, "Double")
time_based_expiration_details = details["time_based_expiration_details"]
time_based_expiration_event_id = (
None
if time_based_expiration_details is None
else generate_time_based_accel_expiration_event_id(vesting_schedule_id, "Double")
)
termination_event_details = details["termination_event_details"]
termination_event_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Double")
# generate the cic event first and set next_condition_ids to include the expiration event
# if applicable, otherwise, just the termination event
condition_ocf_objs.append(
generate_change_in_control_event(
vesting_schedule_id=vesting_schedule_id,
cic_event_definition=cic_event_definition,
next_condition_ids=[
*([time_based_expiration_event_id] if time_based_expiration_event_id is not None else []),
termination_event_id,
]
if time_based_expiration_details is not None
else [termination_event_id],
)
)
# If there is a time-based expiration
if time_based_expiration_details is not None:
condition_ocf_objs.append( | generate_vesting_condition_relative_time_based( | 11 | 2023-11-13 15:50:53+00:00 | 12k |
cyberark/ark-sdk-python | ark_sdk_python/models/actions/services/ark_dpa_exec_action_consts.py | [
{
"identifier": "ArkModel",
"path": "ark_sdk_python/models/ark_model.py",
"snippet": "class ArkModel(BaseModel):\n class Config:\n allow_population_by_field_name = True"
},
{
"identifier": "ArkServiceActionDefinition",
"path": "ark_sdk_python/models/actions/ark_service_action_definition.py",
"snippet": "class ArkServiceActionDefinition(ArkModel):\n action_name: str = Field(description='Action name to be used in the cli commands')\n schemas: Optional[Dict[str, Optional[Type[ArkModel]]]] = Field(description='Schemas for different cli actions for the definition')\n defaults: Optional[Dict[str, Dict[str, Any]]] = Field(description='Defaults for the action schemas parameters')\n async_actions: Optional[List[str]] = Field(description='List of async actions as part of the schemas')\n subactions: Optional[List['ArkServiceActionDefinition']] = Field(description='Subactions to this action')"
},
{
"identifier": "ArkDPACommitPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_commit_policies.py",
"snippet": "class ArkDPACommitPolicies(ArkModel):\n names: Optional[List[str]] = Field(\n description='Policy names to commit from the workspace to the remote, if not given, choices will be prompted'\n )\n all: bool = Field(description='Whether to commit all locally edited policies', default=False)"
},
{
"identifier": "ArkDPAEditPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_edit_policies.py",
"snippet": "class ArkDPAEditPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to edit from the workspace, if not given, choices will be prompted')"
},
{
"identifier": "ArkDPAGetPoliciesStatus",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_get_policies_status.py",
"snippet": "class ArkDPAGetPoliciesStatus(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show status on, if not given, shows status on all policies')"
},
{
"identifier": "ArkDPALoadPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_load_policies.py",
"snippet": "class ArkDPALoadPolicies(ArkModel):\n override: bool = Field(description='Whether to override existing policies', default=False)"
},
{
"identifier": "ArkDPAPoliciesDiff",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_policies_diff.py",
"snippet": "class ArkDPAPoliciesDiff(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show diff on, if not given, shows diff on all policies')\n unified: bool = Field(description='Show all diffs together', default=False)"
},
{
"identifier": "ArkDPARemovePolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_remove_policies.py",
"snippet": "class ArkDPARemovePolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to remove from the workspace, if not given, choices will be prompted')"
},
{
"identifier": "ArkDPAResetPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_reset_policies.py",
"snippet": "class ArkDPAResetPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to reset on the workspace, if not given, all policies are resetted')\n all: bool = Field(description='Whether to reset all locally edited policies', default=False)"
},
{
"identifier": "ArkDPAViewPolicies",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_view_policies.py",
"snippet": "class ArkDPAViewPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to view from the workspace, if not given, choices will be prompted')\n unified: bool = Field(description='Show all requested policies together', default=False)"
},
{
"identifier": "ArkDPADBGeneratePolicy",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/db/ark_dpa_db_generate_policy.py",
"snippet": "class ArkDPADBGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['MySQL', 'MariaDB', 'Postgres', 'MSSQL', 'Oracle']]] = Field(\n description='Providers to generate the policy for'\n )"
},
{
"identifier": "ArkDPAVMGeneratePolicy",
"path": "ark_sdk_python/models/cli_services/dpa/policies_editor/vm/ark_dpa_vm_generate_policy.py",
"snippet": "class ArkDPAVMGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['AWS', 'Azure', 'OnPrem']]] = Field(description='Providers to generate the policy for')\n protocols: Optional[Set[Literal['ssh', 'rdp']]] = Field(description='Protocols to generate the policy for')"
},
{
"identifier": "ArkDPACreateCertificate",
"path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_certificate.py",
"snippet": "class ArkDPACreateCertificate(ArkDPACreateCertificateBase):\n file: FilePath = Field(description='Path to a file with the certificate body')"
},
{
"identifier": "ArkDPADeleteCertificate",
"path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_delete_certificate.py",
"snippet": "class ArkDPADeleteCertificate(ArkModel):\n certificate_id: str = Field(description='ID of the certificate to delete', min_length=1)"
},
{
"identifier": "ArkDPACertificatesFilter",
"path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_filter.py",
"snippet": "class ArkDPACertificatesFilter(ArkModel):\n domain_name: Optional[str] = Field(default=None, description='Filter by domain name')\n cert_name: Optional[str] = Field(default=None, description='Filter by certificate name')"
},
{
"identifier": "ArkDPAGetCertificate",
"path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_get_certificate.py",
"snippet": "class ArkDPAGetCertificate(ArkModel):\n certificate_id: str = Field(description='ID of the certificate', min_length=1)"
},
{
"identifier": "ArkDPAUpdateCertificate",
"path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_update_certificate.py",
"snippet": "class ArkDPAUpdateCertificate(ArkDPACreateCertificateBase):\n certificate_id: str = Field(description='ID of the certificate to update', min_length=1)\n file: FilePath = Field(description='Path to a file with the certificate body')"
},
{
"identifier": "ArkDPADBMysqlExecution",
"path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_mysql_execution.py",
"snippet": "class ArkDPADBMysqlExecution(ArkDPADBBaseExecution):\n mysql_path: str = Field(description='Path to the psql executable', default='mysql')"
},
{
"identifier": "ArkDPADBOracleGenerateAssets",
"path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_oracle_generate_assets.py",
"snippet": "class ArkDPADBOracleGenerateAssets(ArkDPADBBaseGenerateAssets):\n folder: str = Field(description='Where to output the assets')\n unzip: bool = Field(description='Whether to save zipped or not', default=True)"
},
{
"identifier": "ArkDPADBPsqlExecution",
"path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_psql_execution.py",
"snippet": "class ArkDPADBPsqlExecution(ArkDPADBBaseExecution):\n psql_path: str = Field(description='Path to the psql executable', default='psql')"
},
{
"identifier": "ArkDPAK8SGenerateKubeConfig",
"path": "ark_sdk_python/models/services/dpa/k8s/ark_dpa_k8s_generate_kubeconfig.py",
"snippet": "class ArkDPAK8SGenerateKubeConfig(ArkModel):\n folder: str = Field(description='Output folder to download the kube config file', default=None)"
},
{
"identifier": "ArkDPADeletePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py",
"snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkDPAGetPolicy",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py",
"snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkDPAUpdatePolicyStatus",
"path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_update_policy_status.py",
"snippet": "class ArkDPAUpdatePolicyStatus(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to update the status for')\n policy_name: Optional[str] = Field(description='Policy name to update the status for')\n status: ArkDPARuleStatus = Field(description='New status to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBAddPolicy",
"path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_add_policy.py",
"snippet": "class ArkDPADBAddPolicy(ArkDPABaseAddPolicy):\n providers_tags: List[str] = Field(description='Policy tags to use as filters for the assets in the rules', default_factory=list)\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )"
},
{
"identifier": "ArkDPADBPoliciesFilter",
"path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policies_filter.py",
"snippet": "class ArkDPADBPoliciesFilter(ArkDPABasePoliciesFilter):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Filter by policies with given database providers')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_providers(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.MYSQL,\n ArkWorkspaceType.MARIADB,\n ArkWorkspaceType.POSTGRES,\n ArkWorkspaceType.MSSQL,\n ArkWorkspaceType.ORACLE,\n ]:\n raise ValueError('Invalid Database Type')\n return val"
},
{
"identifier": "ArkDPADBUpdatePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_update_policy.py",
"snippet": "class ArkDPADBUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_tags: Optional[List[str]] = Field(description='Policy tags to use as filters for the assets in the rules')\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )"
},
{
"identifier": "ArkDPAVMAddPolicy",
"path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_add_policy.py",
"snippet": "class ArkDPAVMAddPolicy(ArkDPABaseAddPolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(\n description='Workspaces / cloud providers data per type of cloud provider, '\n 'for example for AWS, how to filter ec2 instances to connect to'\n )\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(\n description='Rules describing how and who will be able to connect to the target instances filtered by the cloud providers'\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val"
},
{
"identifier": "ArkDPAVMPoliciesFilter",
"path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policies_filter.py",
"snippet": "class ArkDPAVMPoliciesFilter(ArkDPABasePoliciesFilter):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Filter by policies with given cloud providers')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_providers(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val"
},
{
"identifier": "ArkDPAVMUpdatePolicy",
"path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_update_policy.py",
"snippet": "class ArkDPAVMUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='New cloud providers to update')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='New access rules to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val"
},
{
"identifier": "ArkDPADBAddSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_add_secret.py",
"snippet": "class ArkDPADBAddSecret(ArkModel):\n secret_name: str = Field(description='Name of the secret')\n description: str = Field(description='Description about the secret', default='')\n purpose: str = Field(description='Purpose of the secret', default='')\n secret_type: ArkDPADBSecretType = Field(description='Type of the secret')\n store_type: Optional[ArkDPADBStoreType] = Field(\n description='Store type of the secret of the secret, will be deduced by the secret type if not given'\n )\n tags: List[ArkDPADBTag] = Field(description='Tags of the secret', default_factory=list)\n\n # Username Password Secret Type\n username: Optional[str] = Field(description='Name or id of the user for username_password type')\n password: Optional[SecretStr] = Field(description='Password of the user for username_password type')\n\n # PAM Account Secret Type\n pam_safe: Optional[str] = Field(description='Safe of the account for pam_account type')\n pam_account_name: Optional[str] = Field(description='Account name for pam_account type')\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value() if v else None}"
},
{
"identifier": "ArkDPADBDeleteSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_delete_secret.py",
"snippet": "class ArkDPADBDeleteSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to delete')\n secret_name: Optional[str] = Field(description='Name of the secret to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBDisableSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_disable_secret.py",
"snippet": "class ArkDPADBDisableSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to disable')\n secret_name: Optional[str] = Field(description='Name of the secret to disable')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBEnableSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_enable_secret.py",
"snippet": "class ArkDPADBEnableSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to enable')\n secret_name: Optional[str] = Field(description='Name of the secret to enable')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBGetSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_get_secret.py",
"snippet": "class ArkDPADBGetSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to get')\n secret_name: Optional[str] = Field(description='Name of the secret to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBSecretsFilter",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_secrets_filter.py",
"snippet": "class ArkDPADBSecretsFilter(ArkModel):\n secret_name: Optional[str] = Field(description='Filter by secret name')\n secret_type: Optional[ArkDPADBSecretType] = Field(description='Filter by type')\n store_type: Optional[ArkDPADBStoreType] = Field(description='Filter by store type')\n is_active: Optional[bool] = Field(description='Filter by if secret is active')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Filter by tags')"
},
{
"identifier": "ArkDPADBUpdateSecret",
"path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_update_secret.py",
"snippet": "class ArkDPADBUpdateSecret(ArkModel):\n secret_id: Optional[str] = Field(description='Secret id to update')\n secret_name: Optional[str] = Field(description='Name of the secret to update')\n new_secret_name: Optional[str] = Field(description='New secret name to update to')\n description: Optional[str] = Field(description='Description about the secret to update')\n purpose: Optional[str] = Field(description='Purpose of the secret to update')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags of the secret to change to')\n\n # Username Password Secret Type\n username: Optional[str] = Field(description='Name or id of the user for username_password type')\n password: Optional[SecretStr] = Field(description='Password of the user for username_password type')\n\n # PAM Account Secret Type\n pam_safe: Optional[str] = Field(description='Safe of the account for pam_account type')\n pam_account_name: Optional[str] = Field(description='Account name for pam_account type')\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value() if v else None}\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values"
},
{
"identifier": "ArkDPASSOGetShortLivedClientCertificate",
"path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_client_certificate.py",
"snippet": "class ArkDPASSOGetShortLivedClientCertificate(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)\n folder: Optional[str] = Field(description='Output folder to write the key / certificate to. Required if format is File')\n output_format: ArkDPASSOShortLiveClientCertificateFormat = Field(\n description='The output format of the key / ' 'certificate. i.e. File, Raw, Base64',\n default=ArkDPASSOShortLiveClientCertificateFormat.FILE,\n )"
},
{
"identifier": "ArkDPASSOGetShortLivedOracleWallet",
"path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_oracle_wallet.py",
"snippet": "class ArkDPASSOGetShortLivedOracleWallet(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)\n unzip_wallet: bool = Field(description='Whether to save zipped or not', default=True)\n folder: str = Field(description='Output folder to write the wallet to')"
},
{
"identifier": "ArkDPASSOGetShortLivedPassword",
"path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_password.py",
"snippet": "class ArkDPASSOGetShortLivedPassword(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)"
},
{
"identifier": "ArkDPASSOGetShortLivedRDPFile",
"path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_rdp_file.py",
"snippet": "class ArkDPASSOGetShortLivedRDPFile(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=True)\n folder: str = Field(description='Output folder to write the rdp file to')\n target_address: str = Field(description='Address of the Windows target machine')\n target_domain: Optional[str] = Field(description='Domain of the Windows target machine')"
},
{
"identifier": "ArkDPADBAddDatabase",
"path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_add_database.py",
"snippet": "class ArkDPADBAddDatabase(ArkCamelizedModel):\n name: str = Field(description='Name of the database, often referenced in policies and other APIs')\n network_name: str = Field(description='Name of the network the database resides in, defaulted to on premises', default='ON-PREMISE')\n platform: ArkWorkspaceType = Field(\n description='Platform of the database, as in, where it resides, defaulted to on premises', default=ArkWorkspaceType.ONPREM\n )\n services: Optional[List[str]] = Field(description='Services related to the database, most commonly used with oracle')\n domain_controller_name: Optional[str] = Field(description='Domain controller name associated to this database')\n domain_controller_netbios: Optional[str] = Field(description='Domain controller netbios associated to this database')\n provider_engine: ArkDPADBDatabaseEngineType = Field(\n description='Provider engine, will be later deduced to the identifer of the provider'\n )\n enable_certificate_validation: bool = Field(description='Whether to enable and enforce certificate validation', default=True)\n certificate: Optional[str] = Field(description='Certificate id used for this database that resides in the certificates service')\n read_write_endpoint: str = Field(description='Read write endpoint of the database')\n read_only_endpoint: Optional[str] = Field(description='Optionally, a read only endpoint of the database')\n port: Optional[int] = Field(description='Port of the database, if not given, the default one will be used')\n secret_id: Optional[str] = Field(description='Secret identifier stored in the secret service related to this database')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags for the database')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platform')\n def validate_workspace_type(cls, val):\n if val and ArkWorkspaceType(val) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val"
},
{
"identifier": "ArkDPADBDatabasesFilter",
"path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_databases_filter.py",
"snippet": "class ArkDPADBDatabasesFilter(ArkModel):\n name: Optional[str] = Field(description='Name of the database to filter on')\n provider_family: Optional[ArkDPADBDatabaseFamilyType] = Field(description='List filter by family')\n provider_engine: Optional[ArkDPADBDatabaseEngineType] = Field(description='List filter by engine')\n provider_workspace: Optional[ArkDPADBDatabaseWorkspaceType] = Field(description='List filter by workspace')\n tags: Optional[List[ArkDPADBTag]] = Field(description='List filter by tags')\n db_warnings_filter: Optional[ArkDPADBWarning] = Field(description='Filter by databases who are with warnings / incomplete')"
},
{
"identifier": "ArkDPADBDeleteDatabase",
"path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_delete_database.py",
"snippet": "class ArkDPADBDeleteDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to delete')\n name: Optional[str] = Field(description='Database name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBGetDatabase",
"path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_get_database.py",
"snippet": "class ArkDPADBGetDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to get')\n name: Optional[str] = Field(description='Database name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values"
},
{
"identifier": "ArkDPADBUpdateDatabase",
"path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_update_database.py",
"snippet": "class ArkDPADBUpdateDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to update')\n name: Optional[str] = Field(description='Database name to update')\n new_name: Optional[str] = Field(description='New name for the database')\n network_name: Optional[str] = Field(description='Name of the network the database resides in', default='ON-PREMISE')\n platform: Optional[ArkWorkspaceType] = Field(description='Platform of the database, as in, where it resides')\n services: Optional[List[str]] = Field(description='Services related to the database, most commonly used with oracle')\n domain_controller_name: Optional[str] = Field(description='Domain controller name associated to this database')\n domain_controller_netbios: Optional[str] = Field(description='Domain controller netbios associated to this database')\n provider_engine: Optional[ArkDPADBDatabaseEngineType] = Field(\n description='Provider engine, will be later deduced to the identifer of the provider'\n )\n enable_certificate_validation: bool = Field(description='Whether to enable and enforce certificate validation', default=True)\n certificate: Optional[str] = Field(description='Certificate id used for this database that resides in the certificates service')\n read_write_endpoint: Optional[str] = Field(description='Read write endpoint of the database')\n read_only_endpoint: Optional[str] = Field(description='Optionally, a read only endpoint of the database')\n port: Optional[int] = Field(description='Port of the database, if not given, the default one will be used')\n secret_id: Optional[str] = Field(description='Secret identifier stored in the secret service related to this database')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags for the database')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platform')\n def validate_workspace_type(cls, val):\n if val and ArkWorkspaceType(val) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val"
}
] | from typing import Dict, Final, Optional, Type
from ark_sdk_python.models import ArkModel
from ark_sdk_python.models.actions.ark_service_action_definition import ArkServiceActionDefinition
from ark_sdk_python.models.cli_services.dpa.policies_editor.common import (
ArkDPACommitPolicies,
ArkDPAEditPolicies,
ArkDPAGetPoliciesStatus,
ArkDPALoadPolicies,
ArkDPAPoliciesDiff,
ArkDPARemovePolicies,
ArkDPAResetPolicies,
ArkDPAViewPolicies,
)
from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy
from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy
from ark_sdk_python.models.services.dpa.certificates import (
ArkDPACertificatesFilter,
ArkDPACreateCertificate,
ArkDPADeleteCertificate,
ArkDPAGetCertificate,
ArkDPAUpdateCertificate,
)
from ark_sdk_python.models.services.dpa.db import ArkDPADBMysqlExecution, ArkDPADBOracleGenerateAssets, ArkDPADBPsqlExecution
from ark_sdk_python.models.services.dpa.k8s.ark_dpa_k8s_generate_kubeconfig import ArkDPAK8SGenerateKubeConfig
from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPAUpdatePolicyStatus
from ark_sdk_python.models.services.dpa.policies.db import ArkDPADBAddPolicy, ArkDPADBPoliciesFilter, ArkDPADBUpdatePolicy
from ark_sdk_python.models.services.dpa.policies.vm import ArkDPAVMAddPolicy, ArkDPAVMPoliciesFilter, ArkDPAVMUpdatePolicy
from ark_sdk_python.models.services.dpa.secrets.db import (
ArkDPADBAddSecret,
ArkDPADBDeleteSecret,
ArkDPADBDisableSecret,
ArkDPADBEnableSecret,
ArkDPADBGetSecret,
ArkDPADBSecretsFilter,
ArkDPADBUpdateSecret,
)
from ark_sdk_python.models.services.dpa.sso import (
ArkDPASSOGetShortLivedClientCertificate,
ArkDPASSOGetShortLivedOracleWallet,
ArkDPASSOGetShortLivedPassword,
ArkDPASSOGetShortLivedRDPFile,
)
from ark_sdk_python.models.services.dpa.workspaces.db import (
ArkDPADBAddDatabase,
ArkDPADBDatabasesFilter,
ArkDPADBDeleteDatabase,
ArkDPADBGetDatabase,
ArkDPADBUpdateDatabase,
) | 7,574 |
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-database': ArkDPADBAddDatabase,
'delete-database': ArkDPADBDeleteDatabase,
'update-database': ArkDPADBUpdateDatabase,
'list-databases': None,
'list-databases-by': ArkDPADBDatabasesFilter,
'database': ArkDPADBGetDatabase,
'databases-stats': None,
}
WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP
)
WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='workspaces', subactions=[WORKSPACES_DB_ACTION]
)
POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPAVMAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPAVMUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
'list-policies-by': ArkDPAVMPoliciesFilter,
'policies-stats': None,
}
POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'load-policies': ArkDPALoadPolicies,
'generate-policy': ArkDPAVMGeneratePolicy,
'edit-policies': ArkDPAEditPolicies,
'remove-policies': ArkDPARemovePolicies,
'view-policies': ArkDPAViewPolicies,
'reset-policies': ArkDPAResetPolicies,
'policies-diff': ArkDPAPoliciesDiff,
'policies-status': ArkDPAGetPoliciesStatus,
'commit-policies': ArkDPACommitPolicies,
}
POLICIES_VM_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='vm',
schemas=POLICIES_VM_ACTION_TO_SCHEMA_MAP,
subactions=[ArkServiceActionDefinition(action_name='editor', schemas=POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP)],
)
POLICIES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPADBAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPADBUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
|
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-database': ArkDPADBAddDatabase,
'delete-database': ArkDPADBDeleteDatabase,
'update-database': ArkDPADBUpdateDatabase,
'list-databases': None,
'list-databases-by': ArkDPADBDatabasesFilter,
'database': ArkDPADBGetDatabase,
'databases-stats': None,
}
WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP
)
WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='workspaces', subactions=[WORKSPACES_DB_ACTION]
)
POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPAVMAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPAVMUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
'list-policies-by': ArkDPAVMPoliciesFilter,
'policies-stats': None,
}
POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'load-policies': ArkDPALoadPolicies,
'generate-policy': ArkDPAVMGeneratePolicy,
'edit-policies': ArkDPAEditPolicies,
'remove-policies': ArkDPARemovePolicies,
'view-policies': ArkDPAViewPolicies,
'reset-policies': ArkDPAResetPolicies,
'policies-diff': ArkDPAPoliciesDiff,
'policies-status': ArkDPAGetPoliciesStatus,
'commit-policies': ArkDPACommitPolicies,
}
POLICIES_VM_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='vm',
schemas=POLICIES_VM_ACTION_TO_SCHEMA_MAP,
subactions=[ArkServiceActionDefinition(action_name='editor', schemas=POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP)],
)
POLICIES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPADBAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPADBUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None, | 'list-policies-by': ArkDPADBPoliciesFilter, | 25 | 2023-11-13 09:24:31+00:00 | 12k |
mohenghui/detectAuto_v8 | ultralytics/models/yolo/detect/predict.py | [
{
"identifier": "BasePredictor",
"path": "ultralytics/engine/predictor.py",
"snippet": "class BasePredictor:\n \"\"\"\n BasePredictor.\n\n A base class for creating predictors.\n\n Attributes:\n args (SimpleNamespace): Configuration for the predictor.\n save_dir (Path): Directory to save results.\n done_warmup (bool): Whether the predictor has finished setup.\n model (nn.Module): Model used for prediction.\n data (dict): Data configuration.\n device (torch.device): Device used for prediction.\n dataset (Dataset): Dataset used for prediction.\n vid_path (str): Path to video file.\n vid_writer (cv2.VideoWriter): Video writer for saving video output.\n data_path (str): Path to data.\n \"\"\"\n\n def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):\n \"\"\"\n Initializes the BasePredictor class.\n\n Args:\n cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.\n overrides (dict, optional): Configuration overrides. Defaults to None.\n \"\"\"\n self.args = get_cfg(cfg, overrides)\n self.save_dir = get_save_dir(self.args)\n if self.args.conf is None:\n self.args.conf = 0.25 # default conf=0.25\n self.done_warmup = False\n if self.args.show:\n self.args.show = check_imshow(warn=True)\n\n # Usable if setup is done\n self.model = None\n self.data = self.args.data # data_dict\n self.imgsz = None\n self.device = None\n self.dataset = None\n self.vid_path, self.vid_writer = None, None\n self.plotted_img = None\n self.data_path = None\n self.source_type = None\n self.batch = None\n self.results = None\n self.transforms = None\n self.callbacks = _callbacks or callbacks.get_default_callbacks()\n self.txt_path = None\n self._lock = threading.Lock() # for automatic thread-safe inference\n callbacks.add_integration_callbacks(self)\n\n def preprocess(self, im):\n \"\"\"\n Prepares input image before inference.\n\n Args:\n im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list.\n \"\"\"\n not_tensor = not isinstance(im, torch.Tensor)\n if not_tensor:\n im = np.stack(self.pre_transform(im))\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w)\n im = np.ascontiguousarray(im) # contiguous\n im = torch.from_numpy(im)\n\n im = im.to(self.device)\n im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32\n if not_tensor:\n im /= 255 # 0 - 255 to 0.0 - 1.0\n return im\n\n def inference(self, im, *args, **kwargs):\n \"\"\"Runs inference on a given image using the specified model and arguments.\"\"\"\n visualize = increment_path(self.save_dir / Path(self.batch[0][0]).stem,\n mkdir=True) if self.args.visualize and (not self.source_type.tensor) else False\n return self.model(im, augment=self.args.augment, visualize=visualize)\n\n def pre_transform(self, im):\n \"\"\"\n Pre-transform input image before inference.\n\n Args:\n im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list.\n\n Returns:\n (list): A list of transformed images.\n \"\"\"\n same_shapes = all(x.shape == im[0].shape for x in im)\n letterbox = LetterBox(self.imgsz, auto=same_shapes and self.model.pt, stride=self.model.stride)\n return [letterbox(image=x) for x in im]\n\n def write_results(self, idx, results, batch):\n \"\"\"Write inference results to a file or directory.\"\"\"\n p, im, _ = batch\n log_string = ''\n if len(im.shape) == 3:\n im = im[None] # expand for batch dim\n if self.source_type.webcam or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1\n log_string += f'{idx}: '\n frame = self.dataset.count\n else:\n frame = getattr(self.dataset, 'frame', 0)\n self.data_path = p\n self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')\n log_string += '%gx%g ' % im.shape[2:] # print string\n result = results[idx]\n log_string += result.verbose()\n\n if self.args.save or self.args.show: # Add bbox to image\n plot_args = {\n 'line_width': self.args.line_width,\n 'boxes': self.args.boxes,\n 'conf': self.args.show_conf,\n 'labels': self.args.show_labels}\n if not self.args.retina_masks:\n plot_args['im_gpu'] = im[idx]\n self.plotted_img = result.plot(**plot_args)\n # Write\n if self.args.save_txt:\n result.save_txt(f'{self.txt_path}.txt', save_conf=self.args.save_conf)\n if self.args.save_crop:\n result.save_crop(save_dir=self.save_dir / 'crops',\n file_name=self.data_path.stem + ('' if self.dataset.mode == 'image' else f'_{frame}'))\n\n return log_string\n\n def postprocess(self, preds, img, orig_imgs):\n \"\"\"Post-processes predictions for an image and returns them.\"\"\"\n return preds\n\n def __call__(self, source=None, model=None, stream=False, *args, **kwargs):\n \"\"\"Performs inference on an image or stream.\"\"\"\n self.stream = stream\n if stream:\n return self.stream_inference(source, model, *args, **kwargs)\n else:\n return list(self.stream_inference(source, model, *args, **kwargs)) # merge list of Result into one\n\n def predict_cli(self, source=None, model=None):\n \"\"\"\n Method used for CLI prediction.\n\n It uses always generator as outputs as not required by CLI mode.\n \"\"\"\n gen = self.stream_inference(source, model)\n for _ in gen: # running CLI inference without accumulating any outputs (do not modify)\n pass\n\n def setup_source(self, source):\n \"\"\"Sets up source and inference mode.\"\"\"\n self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2) # check image size\n self.transforms = getattr(self.model.model, 'transforms', classify_transforms(\n self.imgsz[0])) if self.args.task == 'classify' else None\n self.dataset = load_inference_source(source=source,\n imgsz=self.imgsz,\n vid_stride=self.args.vid_stride,\n buffer=self.args.stream_buffer)\n self.source_type = self.dataset.source_type\n if not getattr(self, 'stream', True) and (self.dataset.mode == 'stream' or # streams\n len(self.dataset) > 1000 or # images\n any(getattr(self.dataset, 'video_flag', [False]))): # videos\n LOGGER.warning(STREAM_WARNING)\n self.vid_path, self.vid_writer = [None] * self.dataset.bs, [None] * self.dataset.bs\n\n @smart_inference_mode()\n def stream_inference(self, source=None, model=None, *args, **kwargs):\n \"\"\"Streams real-time inference on camera feed and saves results to file.\"\"\"\n if self.args.verbose:\n LOGGER.info('')\n\n # Setup model\n if not self.model:\n self.setup_model(model)\n\n with self._lock: # for thread-safe inference\n # Setup source every time predict is called\n self.setup_source(source if source is not None else self.args.source)\n\n # Check if save_dir/ label file exists\n if self.args.save or self.args.save_txt:\n (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)\n\n # Warmup model\n if not self.done_warmup:\n self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz))\n self.done_warmup = True\n\n self.seen, self.windows, self.batch, profilers = 0, [], None, (ops.Profile(), ops.Profile(), ops.Profile())\n self.run_callbacks('on_predict_start')\n\n for batch in self.dataset:\n self.run_callbacks('on_predict_batch_start')\n self.batch = batch\n path, im0s, vid_cap, s = batch\n\n # Preprocess\n with profilers[0]:\n im = self.preprocess(im0s)\n\n # Inference\n with profilers[1]:\n preds = self.inference(im, *args, **kwargs)\n\n # Postprocess\n with profilers[2]:\n self.results = self.postprocess(preds, im, im0s)\n\n self.run_callbacks('on_predict_postprocess_end')\n # Visualize, save, write results\n n = len(im0s)\n for i in range(n):\n self.seen += 1\n self.results[i].speed = {\n 'preprocess': profilers[0].dt * 1E3 / n,\n 'inference': profilers[1].dt * 1E3 / n,\n 'postprocess': profilers[2].dt * 1E3 / n}\n p, im0 = path[i], None if self.source_type.tensor else im0s[i].copy()\n p = Path(p)\n\n if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:\n s += self.write_results(i, self.results, (p, im, im0))\n if self.args.save or self.args.save_txt:\n self.results[i].save_dir = self.save_dir.__str__()\n if self.args.show and self.plotted_img is not None:\n self.show(p)\n if self.args.save and self.plotted_img is not None:\n self.save_preds(vid_cap, i, str(self.save_dir / p.name))\n\n self.run_callbacks('on_predict_batch_end')\n yield from self.results\n\n # Print time (inference-only)\n if self.args.verbose:\n LOGGER.info(f'{s}{profilers[1].dt * 1E3:.1f}ms')\n\n # Release assets\n if isinstance(self.vid_writer[-1], cv2.VideoWriter):\n self.vid_writer[-1].release() # release final video writer\n\n # Print results\n if self.args.verbose and self.seen:\n t = tuple(x.t / self.seen * 1E3 for x in profilers) # speeds per image\n LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape '\n f'{(1, 3, *im.shape[2:])}' % t)\n if self.args.save or self.args.save_txt or self.args.save_crop:\n nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels\n s = f\"\\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}\" if self.args.save_txt else ''\n LOGGER.info(f\"Results saved to {colorstr('bold', self.save_dir)}{s}\")\n\n self.run_callbacks('on_predict_end')\n\n def setup_model(self, model, verbose=True):\n \"\"\"Initialize YOLO model with given parameters and set it to evaluation mode.\"\"\"\n self.model = AutoBackend(model or self.args.model,\n device=select_device(self.args.device, verbose=verbose),\n dnn=self.args.dnn,\n data=self.args.data,\n fp16=self.args.half,\n fuse=True,\n verbose=verbose)\n\n self.device = self.model.device # update device\n self.args.half = self.model.fp16 # update half\n self.model.eval()\n\n def show(self, p):\n \"\"\"Display an image in a window using OpenCV imshow().\"\"\"\n im0 = self.plotted_img\n if platform.system() == 'Linux' and p not in self.windows:\n self.windows.append(p)\n cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)\n cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])\n cv2.imshow(str(p), im0)\n cv2.waitKey(500 if self.batch[3].startswith('image') else 1) # 1 millisecond\n\n def save_preds(self, vid_cap, idx, save_path):\n \"\"\"Save video predictions as mp4 at specified path.\"\"\"\n im0 = self.plotted_img\n # Save imgs\n if self.dataset.mode == 'image':\n cv2.imwrite(save_path, im0)\n else: # 'video' or 'stream'\n if self.vid_path[idx] != save_path: # new video\n self.vid_path[idx] = save_path\n if isinstance(self.vid_writer[idx], cv2.VideoWriter):\n self.vid_writer[idx].release() # release previous video writer\n if vid_cap: # video\n fps = int(vid_cap.get(cv2.CAP_PROP_FPS)) # integer required, floats produce error in MP4 codec\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n else: # stream\n fps, w, h = 30, im0.shape[1], im0.shape[0]\n suffix, fourcc = ('.mp4', 'avc1') if MACOS else ('.avi', 'WMV2') if WINDOWS else ('.avi', 'MJPG')\n save_path = str(Path(save_path).with_suffix(suffix))\n self.vid_writer[idx] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\n self.vid_writer[idx].write(im0)\n\n def run_callbacks(self, event: str):\n \"\"\"Runs all registered callbacks for a specific event.\"\"\"\n for callback in self.callbacks.get(event, []):\n callback(self)\n\n def add_callback(self, event: str, func):\n \"\"\"Add callback.\"\"\"\n self.callbacks[event].append(func)"
},
{
"identifier": "Results",
"path": "ultralytics/engine/results.py",
"snippet": "class Results(SimpleClass):\n \"\"\"\n A class for storing and manipulating inference results.\n\n Args:\n orig_img (numpy.ndarray): The original image as a numpy array.\n path (str): The path to the image file.\n names (dict): A dictionary of class names.\n boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection.\n masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image.\n probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task.\n keypoints (List[List[float]], optional): A list of detected keypoints for each object.\n\n Attributes:\n orig_img (numpy.ndarray): The original image as a numpy array.\n orig_shape (tuple): The original image shape in (height, width) format.\n boxes (Boxes, optional): A Boxes object containing the detection bounding boxes.\n masks (Masks, optional): A Masks object containing the detection masks.\n probs (Probs, optional): A Probs object containing probabilities of each class for classification task.\n keypoints (Keypoints, optional): A Keypoints object containing detected keypoints for each object.\n speed (dict): A dictionary of preprocess, inference, and postprocess speeds in milliseconds per image.\n names (dict): A dictionary of class names.\n path (str): The path to the image file.\n _keys (tuple): A tuple of attribute names for non-empty attributes.\n \"\"\"\n\n def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None) -> None:\n \"\"\"Initialize the Results class.\"\"\"\n self.orig_img = orig_img\n self.orig_shape = orig_img.shape[:2]\n self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes\n self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks\n self.probs = Probs(probs) if probs is not None else None\n self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None\n self.speed = {'preprocess': None, 'inference': None, 'postprocess': None} # milliseconds per image\n self.names = names\n self.path = path\n self.save_dir = None\n self._keys = 'boxes', 'masks', 'probs', 'keypoints'\n\n def __getitem__(self, idx):\n \"\"\"Return a Results object for the specified index.\"\"\"\n return self._apply('__getitem__', idx)\n\n def __len__(self):\n \"\"\"Return the number of detections in the Results object.\"\"\"\n for k in self._keys:\n v = getattr(self, k)\n if v is not None:\n return len(v)\n\n def update(self, boxes=None, masks=None, probs=None):\n \"\"\"Update the boxes, masks, and probs attributes of the Results object.\"\"\"\n if boxes is not None:\n ops.clip_boxes(boxes, self.orig_shape) # clip boxes\n self.boxes = Boxes(boxes, self.orig_shape)\n if masks is not None:\n self.masks = Masks(masks, self.orig_shape)\n if probs is not None:\n self.probs = probs\n\n def _apply(self, fn, *args, **kwargs):\n \"\"\"\n Applies a function to all non-empty attributes and returns a new Results object with modified attributes. This\n function is internally called by methods like .to(), .cuda(), .cpu(), etc.\n\n Args:\n fn (str): The name of the function to apply.\n *args: Variable length argument list to pass to the function.\n **kwargs: Arbitrary keyword arguments to pass to the function.\n\n Returns:\n Results: A new Results object with attributes modified by the applied function.\n \"\"\"\n r = self.new()\n for k in self._keys:\n v = getattr(self, k)\n if v is not None:\n setattr(r, k, getattr(v, fn)(*args, **kwargs))\n return r\n\n def cpu(self):\n \"\"\"Return a copy of the Results object with all tensors on CPU memory.\"\"\"\n return self._apply('cpu')\n\n def numpy(self):\n \"\"\"Return a copy of the Results object with all tensors as numpy arrays.\"\"\"\n return self._apply('numpy')\n\n def cuda(self):\n \"\"\"Return a copy of the Results object with all tensors on GPU memory.\"\"\"\n return self._apply('cuda')\n\n def to(self, *args, **kwargs):\n \"\"\"Return a copy of the Results object with tensors on the specified device and dtype.\"\"\"\n return self._apply('to', *args, **kwargs)\n\n def new(self):\n \"\"\"Return a new Results object with the same image, path, and names.\"\"\"\n return Results(orig_img=self.orig_img, path=self.path, names=self.names)\n\n def plot(\n self,\n conf=True,\n line_width=None,\n font_size=None,\n font='Arial.ttf',\n pil=False,\n img=None,\n im_gpu=None,\n kpt_radius=5,\n kpt_line=True,\n labels=True,\n boxes=True,\n masks=True,\n probs=True,\n ):\n \"\"\"\n Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image.\n\n Args:\n conf (bool): Whether to plot the detection confidence score.\n line_width (float, optional): The line width of the bounding boxes. If None, it is scaled to the image size.\n font_size (float, optional): The font size of the text. If None, it is scaled to the image size.\n font (str): The font to use for the text.\n pil (bool): Whether to return the image as a PIL Image.\n img (numpy.ndarray): Plot to another image. if not, plot to original image.\n im_gpu (torch.Tensor): Normalized image in gpu with shape (1, 3, 640, 640), for faster mask plotting.\n kpt_radius (int, optional): Radius of the drawn keypoints. Default is 5.\n kpt_line (bool): Whether to draw lines connecting keypoints.\n labels (bool): Whether to plot the label of bounding boxes.\n boxes (bool): Whether to plot the bounding boxes.\n masks (bool): Whether to plot the masks.\n probs (bool): Whether to plot classification probability\n\n Returns:\n (numpy.ndarray): A numpy array of the annotated image.\n\n Example:\n ```python\n from PIL import Image\n from ultralytics import YOLO\n\n model = YOLO('yolov8n.pt')\n results = model('bus.jpg') # results list\n for r in results:\n im_array = r.plot() # plot a BGR numpy array of predictions\n im = Image.fromarray(im_array[..., ::-1]) # RGB PIL image\n im.show() # show image\n im.save('results.jpg') # save image\n ```\n \"\"\"\n if img is None and isinstance(self.orig_img, torch.Tensor):\n img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy()\n\n names = self.names\n pred_boxes, show_boxes = self.boxes, boxes\n pred_masks, show_masks = self.masks, masks\n pred_probs, show_probs = self.probs, probs\n annotator = Annotator(\n deepcopy(self.orig_img if img is None else img),\n line_width,\n font_size,\n font,\n pil or (pred_probs is not None and show_probs), # Classify tasks default to pil=True\n example=names)\n\n # Plot Segment results\n if pred_masks and show_masks:\n if im_gpu is None:\n img = LetterBox(pred_masks.shape[1:])(image=annotator.result())\n im_gpu = torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device).permute(\n 2, 0, 1).flip(0).contiguous() / 255\n idx = pred_boxes.cls if pred_boxes else range(len(pred_masks))\n annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu)\n\n # Plot Detect results\n if pred_boxes and show_boxes:\n for d in reversed(pred_boxes):\n c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())\n name = ('' if id is None else f'id:{id} ') + names[c]\n label = (f'{name} {conf:.2f}' if conf else name) if labels else None\n annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))\n\n # Plot Classify results\n if pred_probs is not None and show_probs:\n text = ',\\n'.join(f'{names[j] if names else j} {pred_probs.data[j]:.2f}' for j in pred_probs.top5)\n x = round(self.orig_shape[0] * 0.03)\n annotator.text([x, x], text, txt_color=(255, 255, 255)) # TODO: allow setting colors\n\n # Plot Pose results\n if self.keypoints is not None:\n for k in reversed(self.keypoints.data):\n annotator.kpts(k, self.orig_shape, radius=kpt_radius, kpt_line=kpt_line)\n\n return annotator.result()\n\n def verbose(self):\n \"\"\"Return log string for each task.\"\"\"\n log_string = ''\n probs = self.probs\n boxes = self.boxes\n if len(self) == 0:\n return log_string if probs is not None else f'{log_string}(no detections), '\n if probs is not None:\n log_string += f\"{', '.join(f'{self.names[j]} {probs.data[j]:.2f}' for j in probs.top5)}, \"\n if boxes:\n for c in boxes.cls.unique():\n n = (boxes.cls == c).sum() # detections per class\n log_string += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \"\n return log_string\n\n def save_txt(self, txt_file, save_conf=False):\n \"\"\"\n Save predictions into txt file.\n\n Args:\n txt_file (str): txt file path.\n save_conf (bool): save confidence score or not.\n \"\"\"\n boxes = self.boxes\n masks = self.masks\n probs = self.probs\n kpts = self.keypoints\n texts = []\n if probs is not None:\n # Classify\n [texts.append(f'{probs.data[j]:.2f} {self.names[j]}') for j in probs.top5]\n elif boxes:\n # Detect/segment/pose\n for j, d in enumerate(boxes):\n c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())\n line = (c, *d.xywhn.view(-1))\n if masks:\n seg = masks[j].xyn[0].copy().reshape(-1) # reversed mask.xyn, (n,2) to (n*2)\n line = (c, *seg)\n if kpts is not None:\n kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn\n line += (*kpt.reshape(-1).tolist(), )\n line += (conf, ) * save_conf + (() if id is None else (id, ))\n texts.append(('%g ' * len(line)).rstrip() % line)\n\n if texts:\n Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory\n with open(txt_file, 'a') as f:\n f.writelines(text + '\\n' for text in texts)\n\n def save_crop(self, save_dir, file_name=Path('im.jpg')):\n \"\"\"\n Save cropped predictions to `save_dir/cls/file_name.jpg`.\n\n Args:\n save_dir (str | pathlib.Path): Save path.\n file_name (str | pathlib.Path): File name.\n \"\"\"\n if self.probs is not None:\n LOGGER.warning('WARNING ⚠️ Classify task do not support `save_crop`.')\n return\n for d in self.boxes:\n save_one_box(d.xyxy,\n self.orig_img.copy(),\n file=Path(save_dir) / self.names[int(d.cls)] / f'{Path(file_name).stem}.jpg',\n BGR=True)\n\n def tojson(self, normalize=False):\n \"\"\"Convert the object to JSON format.\"\"\"\n if self.probs is not None:\n LOGGER.warning('Warning: Classify task do not support `tojson` yet.')\n return\n\n import json\n\n # Create list of detection dictionaries\n results = []\n data = self.boxes.data.cpu().tolist()\n h, w = self.orig_shape if normalize else (1, 1)\n for i, row in enumerate(data): # xyxy, track_id if tracking, conf, class_id\n box = {'x1': row[0] / w, 'y1': row[1] / h, 'x2': row[2] / w, 'y2': row[3] / h}\n conf = row[-2]\n class_id = int(row[-1])\n name = self.names[class_id]\n result = {'name': name, 'class': class_id, 'confidence': conf, 'box': box}\n if self.boxes.is_track:\n result['track_id'] = int(row[-3]) # track ID\n if self.masks:\n x, y = self.masks.xy[i][:, 0], self.masks.xy[i][:, 1] # numpy array\n result['segments'] = {'x': (x / w).tolist(), 'y': (y / h).tolist()}\n if self.keypoints is not None:\n x, y, visible = self.keypoints[i].data[0].cpu().unbind(dim=1) # torch Tensor\n result['keypoints'] = {'x': (x / w).tolist(), 'y': (y / h).tolist(), 'visible': visible.tolist()}\n results.append(result)\n\n # Convert detections to JSON\n return json.dumps(results, indent=2)"
},
{
"identifier": "ops",
"path": "ultralytics/utils/ops.py",
"snippet": "class Profile(contextlib.ContextDecorator):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback): # noqa\n def __str__(self):\n def time(self):\ndef segment2box(segment, width=640, height=640):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True):\ndef make_divisible(x, divisor):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nc=0, # number of classes (optional)\n max_time_img=0.05,\n max_nms=30000,\n max_wh=7680,\n):\ndef clip_boxes(boxes, shape):\ndef clip_coords(coords, shape):\ndef scale_image(masks, im0_shape, ratio_pad=None):\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xywh2ltwh(x):\ndef xyxy2ltwh(x):\ndef ltwh2xywh(x):\ndef xyxyxyxy2xywhr(corners):\ndef xywhr2xyxyxyxy(center):\ndef ltwh2xyxy(x):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef crop_mask(masks, boxes):\ndef process_mask_upsample(protos, masks_in, bboxes, shape):\ndef process_mask(protos, masks_in, bboxes, shape, upsample=False):\ndef process_mask_native(protos, masks_in, bboxes, shape):\ndef scale_masks(masks, shape, padding=True):\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True):\ndef masks2segments(masks, strategy='largest'):\ndef convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray:\ndef clean_str(s):"
}
] | from ultralytics.engine.predictor import BasePredictor
from ultralytics.engine.results import Results
from ultralytics.utils import ops | 7,711 | # Ultralytics YOLO 🚀, AGPL-3.0 license
class DetectionPredictor(BasePredictor):
"""
A class extending the BasePredictor class for prediction based on a detection model.
Example:
```python
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.detect import DetectionPredictor
args = dict(model='yolov8n.pt', source=ASSETS)
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
```
"""
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects."""
| # Ultralytics YOLO 🚀, AGPL-3.0 license
class DetectionPredictor(BasePredictor):
"""
A class extending the BasePredictor class for prediction based on a detection model.
Example:
```python
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.detect import DetectionPredictor
args = dict(model='yolov8n.pt', source=ASSETS)
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
```
"""
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects.""" | preds = ops.non_max_suppression(preds, | 2 | 2023-11-16 12:49:59+00:00 | 12k |
i-super/Saleor | saleor/graphql/discount/utils.py | [
{
"identifier": "Promotion",
"path": "saleor/discount/models.py",
"snippet": "class Promotion(ModelWithMetadata):\n id = models.UUIDField(primary_key=True, editable=False, unique=True, default=uuid4)\n name = models.CharField(max_length=255)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n old_sale_id = models.IntegerField(blank=True, null=True, unique=True)\n start_date = models.DateTimeField(default=timezone.now)\n end_date = models.DateTimeField(null=True, blank=True)\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n last_notification_scheduled_at = models.DateTimeField(null=True, blank=True)\n objects = PromotionManager()\n\n class Meta:\n ordering = (\"name\", \"pk\")\n\n def is_active(self, date=None):\n if date is None:\n date = datetime.now(pytz.utc)\n return (not self.end_date or self.end_date >= date) and self.start_date <= date\n\n def assign_old_sale_id(self):\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT nextval('discount_promotion_old_sale_id_seq')\")\n result = cursor.fetchone()\n self.old_sale_id = result[0]\n self.save(update_fields=[\"old_sale_id\"])"
},
{
"identifier": "PromotionRule",
"path": "saleor/discount/models.py",
"snippet": "class PromotionRule(models.Model):\n id = models.UUIDField(primary_key=True, editable=False, unique=True, default=uuid4)\n name = models.CharField(max_length=255, blank=True, null=True)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n promotion = models.ForeignKey(\n Promotion, on_delete=models.CASCADE, related_name=\"rules\"\n )\n channels = models.ManyToManyField(Channel)\n catalogue_predicate = models.JSONField(\n blank=True, default=dict, encoder=CustomJsonEncoder\n )\n reward_value_type = models.CharField(\n max_length=255, choices=RewardValueType.CHOICES, blank=True, null=True\n )\n reward_value = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n null=True,\n blank=True,\n )\n old_channel_listing_id = models.IntegerField(blank=True, null=True, unique=True)\n\n class Meta:\n ordering = (\"name\", \"pk\")\n\n def get_discount(self, currency):\n if self.reward_value_type == RewardValueType.FIXED:\n discount_amount = Money(self.reward_value, currency)\n return partial(fixed_discount, discount=discount_amount)\n if self.reward_value_type == RewardValueType.PERCENTAGE:\n return partial(\n percentage_discount,\n percentage=self.reward_value,\n rounding=ROUND_HALF_UP,\n )\n raise NotImplementedError(\"Unknown discount type\")\n\n @staticmethod\n def get_old_channel_listing_ids(qunatity):\n with connection.cursor() as cursor:\n cursor.execute(\n f\"\"\"\n SELECT nextval('discount_promotionrule_old_channel_listing_id_seq')\n FROM generate_series(1, {qunatity})\n \"\"\"\n )\n return cursor.fetchall()"
},
{
"identifier": "ProductsQueryset",
"path": "saleor/product/managers.py",
"snippet": "class ProductsQueryset(models.QuerySet):\n def published(self, channel_slug: str):\n from .models import ProductChannelListing\n\n today = datetime.datetime.now(pytz.UTC)\n channels = Channel.objects.filter(\n slug=str(channel_slug), is_active=True\n ).values(\"id\")\n channel_listings = ProductChannelListing.objects.filter(\n Q(published_at__lte=today) | Q(published_at__isnull=True),\n Exists(channels.filter(pk=OuterRef(\"channel_id\"))),\n is_published=True,\n ).values(\"id\")\n return self.filter(Exists(channel_listings.filter(product_id=OuterRef(\"pk\"))))\n\n def not_published(self, channel_slug: str):\n today = datetime.datetime.now(pytz.UTC)\n return self.annotate_publication_info(channel_slug).filter(\n Q(published_at__gt=today) & Q(is_published=True)\n | Q(is_published=False)\n | Q(is_published__isnull=True)\n )\n\n def published_with_variants(self, channel_slug: str):\n from .models import ProductVariant, ProductVariantChannelListing\n\n published = self.published(channel_slug)\n channels = Channel.objects.filter(\n slug=str(channel_slug), is_active=True\n ).values(\"id\")\n variant_channel_listings = ProductVariantChannelListing.objects.filter(\n Exists(channels.filter(pk=OuterRef(\"channel_id\"))),\n price_amount__isnull=False,\n ).values(\"id\")\n variants = ProductVariant.objects.filter(\n Exists(variant_channel_listings.filter(variant_id=OuterRef(\"pk\")))\n )\n return published.filter(Exists(variants.filter(product_id=OuterRef(\"pk\"))))\n\n def visible_to_user(self, requestor: Union[\"User\", \"App\", None], channel_slug: str):\n from .models import ALL_PRODUCTS_PERMISSIONS, ProductChannelListing\n\n if has_one_of_permissions(requestor, ALL_PRODUCTS_PERMISSIONS):\n if channel_slug:\n channels = Channel.objects.filter(slug=str(channel_slug)).values(\"id\")\n channel_listings = ProductChannelListing.objects.filter(\n Exists(channels.filter(pk=OuterRef(\"channel_id\")))\n ).values(\"id\")\n return self.filter(\n Exists(channel_listings.filter(product_id=OuterRef(\"pk\")))\n )\n return self.all()\n return self.published_with_variants(channel_slug)\n\n def annotate_publication_info(self, channel_slug: str):\n return self.annotate_is_published(channel_slug).annotate_published_at(\n channel_slug\n )\n\n def annotate_is_published(self, channel_slug: str):\n from .models import ProductChannelListing\n\n query = Subquery(\n ProductChannelListing.objects.filter(\n product_id=OuterRef(\"pk\"), channel__slug=str(channel_slug)\n ).values_list(\"is_published\")[:1]\n )\n return self.annotate(\n is_published=ExpressionWrapper(query, output_field=BooleanField())\n )\n\n def annotate_published_at(self, channel_slug: str):\n from .models import ProductChannelListing\n\n query = Subquery(\n ProductChannelListing.objects.filter(\n product_id=OuterRef(\"pk\"), channel__slug=str(channel_slug)\n ).values_list(\"published_at\")[:1]\n )\n return self.annotate(\n published_at=ExpressionWrapper(query, output_field=DateTimeField())\n )\n\n def annotate_visible_in_listings(self, channel_slug):\n from .models import ProductChannelListing\n\n query = Subquery(\n ProductChannelListing.objects.filter(\n product_id=OuterRef(\"pk\"), channel__slug=str(channel_slug)\n ).values_list(\"visible_in_listings\")[:1]\n )\n return self.annotate(\n visible_in_listings=ExpressionWrapper(query, output_field=BooleanField())\n )\n\n def sort_by_attribute(\n self, attribute_pk: Union[int, str], descending: bool = False\n ):\n \"\"\"Sort a query set by the values of the given product attribute.\n\n :param attribute_pk: The database ID (must be a numeric) of the attribute\n to sort by.\n :param descending: The sorting direction.\n \"\"\"\n from ..attribute.models import (\n AssignedProductAttributeValue,\n AttributeProduct,\n AttributeValue,\n )\n\n qs: models.QuerySet = self\n # If the passed attribute ID is valid, execute the sorting\n if not (isinstance(attribute_pk, int) or attribute_pk.isnumeric()):\n return qs.annotate(\n concatenated_values_order=Value(\n None, output_field=models.IntegerField()\n ),\n concatenated_values=Value(None, output_field=models.CharField()),\n )\n\n qs = qs.annotate(\n # Implicit `GROUP BY` required for the `StringAgg` aggregation\n grouped_ids=Count(\"id\"),\n # String aggregation of the attribute's values to efficiently sort them\n concatenated_values=Case(\n # If the product has no association data but has\n # the given attribute associated to its product type,\n # then consider the concatenated values as empty (non-null).\n When(\n Exists(\n AttributeProduct.objects.filter(\n product_type_id=OuterRef(\"product_type_id\"),\n attribute_id=attribute_pk,\n )\n )\n & ~Exists(\n AssignedProductAttributeValue.objects.filter(\n product_id=OuterRef(\"id\"), value__attribute_id=attribute_pk\n )\n ),\n then=Value(\"\"),\n ),\n default=StringAgg(\n F(\"attributevalues__value__name\"),\n filter=Q(attributevalues__value__attribute_id=attribute_pk),\n delimiter=\",\",\n ordering=(\n [\n f\"attributevalues__value__{field_name}\"\n for field_name in AttributeValue._meta.ordering or []\n ]\n ),\n ),\n output_field=models.CharField(),\n ),\n concatenated_values_order=Case(\n # Make the products having no such attribute be last in the sorting\n When(concatenated_values=None, then=2),\n # Put the products having an empty attribute value at the bottom of\n # the other products.\n When(concatenated_values=\"\", then=1),\n # Put the products having an attribute value to be always at the top\n default=0,\n output_field=models.IntegerField(),\n ),\n )\n\n # Sort by concatenated_values_order then\n # Sort each group of products (0, 1, 2, ...) per attribute values\n # Sort each group of products by name,\n # if they have the same values or not values\n ordering = \"-\" if descending else \"\"\n return qs.order_by(\n f\"{ordering}concatenated_values_order\",\n f\"{ordering}concatenated_values\",\n f\"{ordering}name\",\n )\n\n def prefetched_for_webhook(self, single_object=True):\n common_fields = (\n \"media\",\n \"variants__attributes__values\",\n \"variants__attributes__assignment__attribute\",\n \"variants__variant_media__media\",\n \"variants__stocks__allocations\",\n \"variants__channel_listings__channel\",\n \"channel_listings__channel\",\n \"product_type__product_attributes__values\",\n \"product_type__attributeproduct\",\n )\n if single_object:\n return self.prefetch_related(*common_fields)\n return self.prefetch_related(\"collections\", \"category\", *common_fields)"
},
{
"identifier": "ProductVariantQueryset",
"path": "saleor/product/managers.py",
"snippet": "class ProductVariantQueryset(models.QuerySet):\n def annotate_quantities(self):\n \"\"\"Annotate the queryset with quantity-related fields.\n\n This method annotates the queryset with the following fields:\n - `quantity`: The total quantity in stock for each product variant.\n - `quantity_allocated`: The total quantity allocated from the stock\n for each product variant.\n - `available_quantity`: The available quantity for each product variant,\n which is calculated as `quantity - quantity_allocated`.\n \"\"\"\n\n from saleor.warehouse.models import Allocation\n\n allocations_subquery = (\n Allocation.objects.filter(stock__product_variant=OuterRef(\"pk\"))\n .values(\"stock__product_variant\")\n .annotate(total_allocated=Coalesce(Sum(\"quantity_allocated\"), 0))\n .values(\"total_allocated\")\n )\n\n return self.annotate(\n quantity=Coalesce(Sum(\"stocks__quantity\"), Value(0)),\n quantity_allocated=Coalesce(\n Subquery(allocations_subquery, output_field=models.IntegerField()),\n Value(0),\n ),\n available_quantity=Case(\n When(quantity_allocated=None, then=F(\"quantity\")),\n default=F(\"quantity\")\n - Coalesce(\n Subquery(allocations_subquery, output_field=models.IntegerField()),\n Value(0),\n ),\n output_field=models.IntegerField(),\n ),\n )\n\n def available_in_channel(self, channel_slug):\n return self.filter(\n channel_listings__price_amount__isnull=False,\n channel_listings__channel__slug=str(channel_slug),\n )\n\n def prefetched_for_webhook(self):\n return self.prefetch_related(\n \"attributes__values\",\n \"attributes__assignment__attribute\",\n \"variant_media__media\",\n )"
},
{
"identifier": "Category",
"path": "saleor/product/models.py",
"snippet": "class Category(ModelWithMetadata, MPTTModel, SeoModel):\n name = models.CharField(max_length=250)\n slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n description_plaintext = TextField(blank=True)\n updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)\n parent = models.ForeignKey(\n \"self\", null=True, blank=True, related_name=\"children\", on_delete=models.CASCADE\n )\n background_image = models.ImageField(\n upload_to=\"category-backgrounds\", blank=True, null=True\n )\n background_image_alt = models.CharField(max_length=128, blank=True)\n\n objects = models.Manager()\n tree = TreeManager() # type: ignore[django-manager-missing]\n\n class Meta:\n indexes = [\n *ModelWithMetadata.Meta.indexes,\n GinIndex(\n name=\"category_search_name_slug_gin\",\n # `opclasses` and `fields` should be the same length\n fields=[\"name\", \"slug\", \"description_plaintext\"],\n opclasses=[\"gin_trgm_ops\"] * 3,\n ),\n BTreeIndex(fields=[\"updated_at\"], name=\"updated_at_idx\"),\n ]\n\n def __str__(self) -> str:\n return self.name"
},
{
"identifier": "Collection",
"path": "saleor/product/models.py",
"snippet": "class Collection(SeoModel, ModelWithMetadata):\n name = models.CharField(max_length=250)\n slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)\n products = models.ManyToManyField(\n Product,\n blank=True,\n related_name=\"collections\",\n through=CollectionProduct,\n through_fields=(\"collection\", \"product\"),\n )\n background_image = models.ImageField(\n upload_to=\"collection-backgrounds\", blank=True, null=True\n )\n background_image_alt = models.CharField(max_length=128, blank=True)\n\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n\n objects = managers.CollectionManager()\n\n class Meta(ModelWithMetadata.Meta):\n ordering = (\"slug\",)\n indexes = [\n *ModelWithMetadata.Meta.indexes,\n GinIndex(\n name=\"collection_search_gin\",\n # `opclasses` and `fields` should be the same length\n fields=[\"name\", \"slug\"],\n opclasses=[\"gin_trgm_ops\"] * 2,\n ),\n ]\n\n def __str__(self) -> str:\n return self.name"
},
{
"identifier": "CollectionProduct",
"path": "saleor/product/models.py",
"snippet": "class CollectionProduct(SortableModel):\n collection = models.ForeignKey(\n \"Collection\", related_name=\"collectionproduct\", on_delete=models.CASCADE\n )\n product = models.ForeignKey(\n Product, related_name=\"collectionproduct\", on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ((\"collection\", \"product\"),)\n\n def get_ordering_queryset(self):\n return self.product.collectionproduct.all()"
},
{
"identifier": "Product",
"path": "saleor/product/models.py",
"snippet": "class Product(SeoModel, ModelWithMetadata, ModelWithExternalReference):\n product_type = models.ForeignKey(\n ProductType, related_name=\"products\", on_delete=models.CASCADE\n )\n name = models.CharField(max_length=250)\n slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n description_plaintext = TextField(blank=True)\n search_document = models.TextField(blank=True, default=\"\")\n search_vector = SearchVectorField(blank=True, null=True)\n search_index_dirty = models.BooleanField(default=False, db_index=True)\n\n category = models.ForeignKey(\n Category,\n related_name=\"products\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n weight = MeasurementField(\n measurement=Weight,\n unit_choices=WeightUnits.CHOICES,\n blank=True,\n null=True,\n )\n default_variant = models.OneToOneField(\n \"ProductVariant\",\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n rating = models.FloatField(null=True, blank=True)\n tax_class = models.ForeignKey(\n TaxClass,\n related_name=\"products\",\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n )\n\n objects = managers.ProductManager()\n\n class Meta:\n app_label = \"product\"\n ordering = (\"slug\",)\n permissions = (\n (ProductPermissions.MANAGE_PRODUCTS.codename, \"Manage products.\"),\n )\n indexes = [\n GinIndex(\n name=\"product_search_gin\",\n fields=[\"search_document\"],\n opclasses=[\"gin_trgm_ops\"],\n ),\n GinIndex(\n name=\"product_tsearch\",\n fields=[\"search_vector\"],\n ),\n GinIndex(\n name=\"product_gin\",\n fields=[\"name\", \"slug\"],\n opclasses=[\"gin_trgm_ops\"] * 2,\n ),\n ]\n indexes.extend(ModelWithMetadata.Meta.indexes)\n\n def __iter__(self):\n if not hasattr(self, \"__variants\"):\n setattr(self, \"__variants\", self.variants.all())\n return iter(getattr(self, \"__variants\"))\n\n def __repr__(self) -> str:\n class_ = type(self)\n return f\"<{class_.__module__}.{class_.__name__}(pk={self.pk!r}, name={self.name!r})>\"\n\n def __str__(self) -> str:\n return self.name\n\n def get_first_image(self):\n all_media = self.media.all()\n images = [media for media in all_media if media.type == ProductMediaTypes.IMAGE]\n return images[0] if images else None\n\n @staticmethod\n def sort_by_attribute_fields() -> list:\n return [\"concatenated_values_order\", \"concatenated_values\", \"name\"]"
},
{
"identifier": "ProductVariant",
"path": "saleor/product/models.py",
"snippet": "class ProductVariant(SortableModel, ModelWithMetadata, ModelWithExternalReference):\n sku = models.CharField(max_length=255, unique=True, null=True, blank=True)\n name = models.CharField(max_length=255, blank=True)\n product = models.ForeignKey(\n Product, related_name=\"variants\", on_delete=models.CASCADE\n )\n media = models.ManyToManyField(\n \"product.ProductMedia\", through=\"product.VariantMedia\"\n )\n track_inventory = models.BooleanField(default=True)\n is_preorder = models.BooleanField(default=False)\n preorder_end_date = models.DateTimeField(null=True, blank=True)\n preorder_global_threshold = models.IntegerField(blank=True, null=True)\n quantity_limit_per_customer = models.IntegerField(\n blank=True, null=True, validators=[MinValueValidator(1)]\n )\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n\n weight = MeasurementField(\n measurement=Weight,\n unit_choices=WeightUnits.CHOICES,\n blank=True,\n null=True,\n )\n\n objects = managers.ProductVariantManager()\n\n class Meta(ModelWithMetadata.Meta):\n ordering = (\"sort_order\", \"sku\")\n app_label = \"product\"\n\n def __str__(self) -> str:\n return self.name or self.sku or f\"ID:{self.pk}\"\n\n def get_global_id(self):\n return graphene.Node.to_global_id(\"ProductVariant\", self.id)\n\n def get_base_price(\n self,\n channel_listing: \"ProductVariantChannelListing\",\n price_override: Optional[\"Decimal\"] = None,\n ) -> \"Money\":\n \"\"\"Return the base variant price before applying the promotion discounts.\"\"\"\n return (\n channel_listing.price\n if price_override is None\n else Money(price_override, channel_listing.currency)\n )\n\n def get_price(\n self,\n channel_listing: \"ProductVariantChannelListing\",\n price_override: Optional[\"Decimal\"] = None,\n promotion_rules: Optional[Iterable[\"PromotionRule\"]] = None,\n ) -> \"Money\":\n \"\"\"Return the variant discounted price with applied promotions.\n\n If a custom price is provided, return the price with applied discounts from\n valid promotion rules for this variant.\n \"\"\"\n if price_override is None:\n return channel_listing.discounted_price or channel_listing.price\n price: \"Money\" = self.get_base_price(channel_listing, price_override)\n rules = promotion_rules or []\n return calculate_discounted_price_for_rules(\n price=price, rules=rules, currency=channel_listing.currency\n )\n\n def get_weight(self):\n return self.weight or self.product.weight or self.product.product_type.weight\n\n def is_shipping_required(self) -> bool:\n return self.product.product_type.is_shipping_required\n\n def is_gift_card(self) -> bool:\n return self.product.product_type.kind == ProductTypeKind.GIFT_CARD\n\n def is_digital(self) -> bool:\n is_digital = self.product.product_type.is_digital\n return not self.is_shipping_required() and is_digital\n\n def display_product(self, translated: bool = False) -> str:\n if translated:\n product = get_translation(self.product).name\n variant_display = get_translation(self).name\n else:\n variant_display = str(self)\n product = self.product\n product_display = (\n f\"{product} ({variant_display})\" if variant_display else str(product)\n )\n return product_display\n\n def get_ordering_queryset(self):\n return self.product.variants.all()\n\n def is_preorder_active(self):\n return self.is_preorder and (\n self.preorder_end_date is None or timezone.now() <= self.preorder_end_date\n )"
},
{
"identifier": "where_filter_qs",
"path": "saleor/graphql/core/connection.py",
"snippet": "def where_filter_qs(iterable, args, filterset_class, filter_input, request):\n \"\"\"Filter queryset by complex statement provided in where argument.\n\n Handle `AND`, `OR`, `NOT` operators, as well as flat filter input.\n The returned queryset contains data that fulfill all specified statements.\n The condition can be nested, the operators cannot be mixed in\n a single filter object.\n Multiple operators can be provided with use of nesting. See the example below.\n\n E.g.\n {\n 'where': {\n 'AND': [\n {'input_type': {'one_of': ['rich-text', 'dropdown']}}\n {\n 'OR': [\n {'name': {'eq': 'Author'}},\n {'slug': {'one_of': ['a-rich', 'abv']}}\n ]\n },\n {\n 'NOT': {'name': {'eq': 'ABV'}}\n }\n ],\n }\n }\n For above example the returned instances will fulfill following conditions:\n - it must be a type o 'rich-text'or 'dropdown'\n - the name must equal to 'Author' or the slug must be equal to `a-rich` or `abv`\n - the name cannot be equal to `ABV`\n \"\"\"\n # when any operator appear there cannot be any more data in filter input\n if contains_filter_operator(filter_input) and len(filter_input) > 1:\n raise GraphQLError(\"Cannot mix operators with other filter inputs.\")\n\n and_filter_input = filter_input.pop(\"AND\", None)\n or_filter_input = filter_input.pop(\"OR\", None)\n # TODO: needs optimization\n # not_filter_input = filter_input.pop(\"NOT\", None)\n\n if isinstance(iterable, ChannelQsContext):\n queryset = iterable.qs\n else:\n queryset = iterable\n\n if and_filter_input:\n queryset = _handle_and_filter_input(\n and_filter_input, queryset, args, filterset_class, request\n )\n\n if or_filter_input:\n queryset = _handle_or_filter_input(\n or_filter_input, queryset, args, filterset_class, request\n )\n\n # TODO: needs optimization\n # if not_filter_input:\n # queryset = _handle_not_filter_input(\n # not_filter_input, queryset, args, filterset_class, request\n # )\n\n if filter_input:\n qs_to_combine = filter_qs(\n iterable, args, filterset_class, filter_input, request\n )\n if isinstance(qs_to_combine, ChannelQsContext):\n queryset &= qs_to_combine.qs\n\n else:\n queryset &= qs_to_combine\n\n if isinstance(iterable, ChannelQsContext):\n return ChannelQsContext(queryset, iterable.channel_slug)\n\n return queryset"
},
{
"identifier": "CategoryWhere",
"path": "saleor/graphql/product/filters.py",
"snippet": "class CategoryWhere(MetadataWhereFilterBase):\n ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids(\"Category\"))\n\n class Meta:\n model = Category\n fields = []"
},
{
"identifier": "CollectionWhere",
"path": "saleor/graphql/product/filters.py",
"snippet": "class CollectionWhere(MetadataWhereFilterBase):\n ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids(\"Collection\"))\n\n class Meta:\n model = Collection\n fields = []"
},
{
"identifier": "ProductVariantWhere",
"path": "saleor/graphql/product/filters.py",
"snippet": "class ProductVariantWhere(MetadataWhereFilterBase):\n ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids(\"ProductVariant\"))\n\n class Meta:\n model = ProductVariant\n fields = []"
},
{
"identifier": "ProductWhere",
"path": "saleor/graphql/product/filters.py",
"snippet": "class ProductWhere(MetadataWhereFilterBase):\n ids = GlobalIDMultipleChoiceWhereFilter(method=filter_by_ids(\"Product\"))\n name = OperationObjectTypeWhereFilter(\n input_class=StringFilterInput,\n method=\"filter_product_name\",\n help_text=\"Filter by product name.\",\n )\n slug = OperationObjectTypeWhereFilter(\n input_class=StringFilterInput,\n method=\"filter_product_slug\",\n help_text=\"Filter by product slug.\",\n )\n product_type = OperationObjectTypeWhereFilter(\n input_class=GlobalIDFilterInput,\n method=\"filter_product_type\",\n help_text=\"Filter by product type.\",\n )\n category = OperationObjectTypeWhereFilter(\n input_class=GlobalIDFilterInput,\n method=\"filter_category\",\n help_text=\"Filter by product category.\",\n )\n collection = OperationObjectTypeWhereFilter(\n input_class=GlobalIDFilterInput,\n method=\"filter_collection\",\n help_text=\"Filter by collection.\",\n )\n is_available = BooleanWhereFilter(\n method=\"filter_is_available\", help_text=\"Filter by availability for purchase.\"\n )\n is_published = BooleanWhereFilter(\n method=\"filter_is_published\", help_text=\"Filter by public visibility.\"\n )\n is_visible_in_listing = BooleanWhereFilter(\n method=\"filter_is_listed\", help_text=\"Filter by visibility on the channel.\"\n )\n published_from = ObjectTypeWhereFilter(\n input_class=graphene.DateTime,\n method=\"filter_published_from\",\n help_text=\"Filter by the publication date.\",\n )\n available_from = ObjectTypeWhereFilter(\n input_class=graphene.DateTime,\n method=\"filter_available_from\",\n help_text=\"Filter by the date of availability for purchase.\",\n )\n has_category = BooleanWhereFilter(\n method=where_filter_has_category,\n help_text=\"Filter by product with category assigned.\",\n )\n price = OperationObjectTypeWhereFilter(\n input_class=DecimalFilterInput,\n method=\"filter_variant_price\",\n help_text=\"Filter by product variant price.\",\n )\n minimal_price = OperationObjectTypeWhereFilter(\n input_class=DecimalFilterInput,\n method=\"filter_minimal_price\",\n field_name=\"minimal_price_amount\",\n help_text=\"Filter by the lowest variant price after discounts.\",\n )\n attributes = ListObjectTypeWhereFilter(\n input_class=\"saleor.graphql.attribute.types.AttributeInput\",\n method=\"filter_attributes\",\n help_text=\"Filter by attributes associated with the product.\",\n )\n stock_availability = EnumWhereFilter(\n input_class=StockAvailability,\n method=\"filter_stock_availability\",\n help_text=\"Filter by variants having specific stock status.\",\n )\n stocks = ObjectTypeWhereFilter(\n input_class=ProductStockFilterInput,\n method=where_filter_stocks,\n help_text=\"Filter by stock of the product variant.\",\n )\n gift_card = BooleanWhereFilter(\n method=where_filter_gift_card,\n help_text=\"Filter on whether product is a gift card or not.\",\n )\n has_preordered_variants = BooleanWhereFilter(\n method=where_filter_has_preordered_variants,\n help_text=\"Filter by product with preordered variants.\",\n )\n updated_at = ObjectTypeWhereFilter(\n input_class=DateTimeFilterInput,\n method=where_filter_updated_at_range,\n help_text=\"Filter by when was the most recent update.\",\n )\n\n class Meta:\n model = Product\n fields = []\n\n @staticmethod\n def filter_product_name(qs, _, value):\n return filter_where_by_string_field(qs, \"name\", value)\n\n @staticmethod\n def filter_product_slug(qs, _, value):\n return filter_where_by_string_field(qs, \"slug\", value)\n\n @staticmethod\n def filter_product_type(qs, _, value):\n return filter_where_by_id_field(qs, \"product_type\", value, \"ProductType\")\n\n @staticmethod\n def filter_category(qs, _, value):\n return filter_where_by_id_field(qs, \"category\", value, \"Category\")\n\n @staticmethod\n def filter_collection(qs, _, value):\n collection_products_qs = CollectionProduct.objects.filter()\n collection_products_qs = filter_where_by_id_field(\n collection_products_qs, \"collection_id\", value, \"Collection\"\n )\n collection_products = collection_products_qs.values(\"product_id\")\n return qs.filter(Exists(collection_products.filter(product_id=OuterRef(\"pk\"))))\n\n def filter_is_available(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return where_filter_products_is_available(\n queryset,\n name,\n value,\n channel_slug,\n )\n\n def filter_is_published(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return _filter_products_is_published(\n queryset,\n name,\n value,\n channel_slug,\n )\n\n def filter_is_listed(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return _filter_products_visible_in_listing(\n queryset,\n name,\n value,\n channel_slug,\n )\n\n def filter_published_from(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return where_filter_products_channel_field_from_date(\n queryset,\n name,\n value,\n channel_slug,\n \"published_at\",\n )\n\n def filter_available_from(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return where_filter_products_channel_field_from_date(\n queryset,\n name,\n value,\n channel_slug,\n \"available_for_purchase_at\",\n )\n\n def filter_variant_price(self, qs, _, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n channel_id = Channel.objects.filter(slug=channel_slug).values(\"pk\")\n variant_listing = ProductVariantChannelListing.objects.filter(\n Exists(channel_id.filter(pk=OuterRef(\"channel_id\")))\n )\n variant_listing = filter_where_by_numeric_field(\n variant_listing, \"price_amount\", value\n )\n variant_listing = variant_listing.values(\"variant_id\")\n variants = ProductVariant.objects.filter(\n Exists(variant_listing.filter(variant_id=OuterRef(\"pk\")))\n ).values(\"product_id\")\n return qs.filter(Exists(variants.filter(product_id=OuterRef(\"pk\"))))\n\n def filter_minimal_price(self, qs, _, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n channel = Channel.objects.filter(slug=channel_slug).first()\n if not channel:\n return qs\n product_listing = ProductChannelListing.objects.filter(channel_id=channel.id)\n product_listing = filter_where_by_numeric_field(\n product_listing, \"discounted_price_amount\", value\n )\n product_listing = product_listing.values(\"product_id\")\n return qs.filter(Exists(product_listing.filter(product_id=OuterRef(\"pk\"))))\n\n @staticmethod\n def filter_attributes(queryset, name, value):\n return where_filter_attributes(queryset, name, value)\n\n def filter_stock_availability(self, queryset, name, value):\n channel_slug = get_channel_slug_from_filter_data(self.data)\n return where_filter_stock_availability(queryset, name, value, channel_slug)"
}
] | from collections import defaultdict
from copy import deepcopy
from enum import Enum
from typing import Optional, Union, cast
from django.db.models import Exists, OuterRef, QuerySet
from graphene.utils.str_converters import to_camel_case
from ...discount.models import Promotion, PromotionRule
from ...product.managers import ProductsQueryset, ProductVariantQueryset
from ...product.models import (
Category,
Collection,
CollectionProduct,
Product,
ProductVariant,
)
from ..core.connection import where_filter_qs
from ..product.filters import (
CategoryWhere,
CollectionWhere,
ProductVariantWhere,
ProductWhere,
)
import graphene | 8,459 |
PREDICATE_OPERATOR_DATA_T = list[dict[str, Union[list, dict, str, bool]]]
class Operators(Enum):
AND = "and"
OR = "or"
# TODO: move to validators in promotion dir
def clean_predicate(predicate: Union[dict[str, Union[dict, list]], list]):
"""Convert camel cases keys into snake case."""
if isinstance(predicate, list):
return [
clean_predicate(item) if isinstance(item, (dict, list)) else item
for item in predicate
]
return {
to_camel_case(key): clean_predicate(value)
if isinstance(value, (dict, list))
else value
for key, value in predicate.items()
}
def get_products_for_promotion(promotion: Promotion) -> ProductsQueryset:
"""Get products that are included in the promotion based on catalogue predicate."""
variants = get_variants_for_promotion(promotion)
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_products_for_rule(rule: PromotionRule) -> ProductsQueryset:
"""Get products that are included in the rule based on catalogue predicate."""
variants = get_variants_for_predicate(deepcopy(rule.catalogue_predicate))
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_variants_for_promotion(promotion: Promotion) -> ProductVariantQueryset:
"""Get variants that are included in the promotion based on catalogue predicate."""
queryset = ProductVariant.objects.none()
for rule in promotion.rules.iterator():
queryset |= get_variants_for_predicate(rule.catalogue_predicate)
return queryset
def _handle_product_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
product_qs = where_filter_qs(
Product.objects.all(), {}, ProductWhere, predicate_data, None
)
return ProductVariant.objects.filter(
Exists(product_qs.filter(id=OuterRef("product_id")))
)
def _handle_variant_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
return where_filter_qs(
ProductVariant.objects.all(), {}, ProductVariantWhere, predicate_data, None
)
def _handle_collection_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
collection_qs = where_filter_qs(
|
PREDICATE_OPERATOR_DATA_T = list[dict[str, Union[list, dict, str, bool]]]
class Operators(Enum):
AND = "and"
OR = "or"
# TODO: move to validators in promotion dir
def clean_predicate(predicate: Union[dict[str, Union[dict, list]], list]):
"""Convert camel cases keys into snake case."""
if isinstance(predicate, list):
return [
clean_predicate(item) if isinstance(item, (dict, list)) else item
for item in predicate
]
return {
to_camel_case(key): clean_predicate(value)
if isinstance(value, (dict, list))
else value
for key, value in predicate.items()
}
def get_products_for_promotion(promotion: Promotion) -> ProductsQueryset:
"""Get products that are included in the promotion based on catalogue predicate."""
variants = get_variants_for_promotion(promotion)
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_products_for_rule(rule: PromotionRule) -> ProductsQueryset:
"""Get products that are included in the rule based on catalogue predicate."""
variants = get_variants_for_predicate(deepcopy(rule.catalogue_predicate))
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_variants_for_promotion(promotion: Promotion) -> ProductVariantQueryset:
"""Get variants that are included in the promotion based on catalogue predicate."""
queryset = ProductVariant.objects.none()
for rule in promotion.rules.iterator():
queryset |= get_variants_for_predicate(rule.catalogue_predicate)
return queryset
def _handle_product_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
product_qs = where_filter_qs(
Product.objects.all(), {}, ProductWhere, predicate_data, None
)
return ProductVariant.objects.filter(
Exists(product_qs.filter(id=OuterRef("product_id")))
)
def _handle_variant_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
return where_filter_qs(
ProductVariant.objects.all(), {}, ProductVariantWhere, predicate_data, None
)
def _handle_collection_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
collection_qs = where_filter_qs( | Collection.objects.all(), {}, CollectionWhere, predicate_data, None | 5 | 2023-11-13 05:00:35+00:00 | 12k |
Aues6uen11Z/Zafkiel | zafkiel/ui/ui.py | [
{
"identifier": "ImageTemplate",
"path": "zafkiel/device/template.py",
"snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n\n super().__init__(filename, threshold, target_pos, record_pos, resolution, rgb, scale_max, scale_step)\n\n self.template_path = template_path # under root path\n self.keyword = keyword\n if self.keyword is not None and self.keyword.name == '':\n \"\"\"\n Please note that due to the __post_init__ method of the Keyword class running before this 'name' assignment, \n its 'instances' dictionary will get a dictionary item with an empty string key.\n This means that each instance of the Keyword class that omits the 'name' parameter will be constantly \n overwritten. If you want to use Keyword().instances for special purposes, you must initialize 'name'.\n \"\"\"\n self.keyword.name = self.name\n\n @cached_property\n def filepath(self) -> str:\n if self._filepath:\n return self._filepath\n for dir_name in G.BASEDIR:\n filepath = os.path.join(dir_name, self.template_path, self.filename)\n if os.path.isfile(filepath):\n self._filepath = filepath\n return self._filepath\n return self.filename\n\n @cached_property\n def name(self) -> str:\n return Path(self.filename).stem\n\n @cached_property\n def image(self) -> ndarray:\n return self._imread()\n\n @cached_property\n def height(self) -> int:\n return self.image.shape[0]\n\n @cached_property\n def width(self) -> int:\n return self.image.shape[1]\n\n def _has_border(self) -> bool:\n \"\"\"\n If game running in a bordered process, coordinates need to be corrected.\n\n Returns:\n Whether the game running in a bordered process.\n \"\"\"\n actual_ratio = G.DEVICE.get_current_resolution()[0] / G.DEVICE.get_current_resolution()[1]\n template_ratio = self.resolution[0] / self.resolution[1]\n return actual_ratio != template_ratio\n\n def ratio(self, screen_height: float = None) -> float:\n \"\"\"\n Calculate the ratio of the current screen to the template image.\n \"\"\"\n if screen_height is None:\n if self._has_border():\n border = Config.BORDER[0] + Config.BORDER[2]\n else:\n border = 0\n screen_height = G.DEVICE.get_current_resolution()[1] - border\n\n return screen_height / self.resolution[1]\n\n @cached_property\n def area(self) -> tuple:\n \"\"\"\n Calculate the area of the template image on the current screen.\n\n Returns:\n Upper left and lower right corner coordinate.\n \"\"\"\n screen_resolution = G.DEVICE.get_current_resolution()\n\n if self._has_border():\n border = Config.BORDER\n else:\n border = (0, 0, 0)\n\n screen_width = screen_resolution[0] - border[1] * 2\n screen_height = screen_resolution[1] - border[0] - border[2]\n\n ratio = self.ratio(screen_height)\n x1 = screen_width / 2 + self.record_pos[0] * screen_width - self.width / 2 * ratio + border[1]\n y1 = screen_height / 2 + self.record_pos[1] * screen_width - self.height / 2 * ratio + border[0]\n x2 = screen_width / 2 + self.record_pos[0] * screen_width + self.width / 2 * ratio + border[1]\n y2 = screen_height / 2 + self.record_pos[1] * screen_width + self.height / 2 * ratio + border[0]\n return x1, y1, x2, y2"
},
{
"identifier": "logger",
"path": "zafkiel/logger.py",
"snippet": ""
},
{
"identifier": "API",
"path": "zafkiel/device/api.py",
"snippet": "class API:\n \"\"\"\n Device Setup APIs\n \"\"\"\n\n @staticmethod\n def init_device(platform=\"Android\", uuid=None, **kwargs):\n return init_device(platform, uuid, **kwargs)\n\n @staticmethod\n def connect_device(uri):\n return connect_device(uri)\n\n @staticmethod\n def device():\n return device()\n\n @staticmethod\n def set_current(idx):\n set_current(idx)\n\n @staticmethod\n def auto_setup(\n basedir: str = None,\n devices: list = None,\n firing_time: int = 30,\n logdir: bool = None,\n project_root: str = None,\n compress: int = None\n ):\n \"\"\"\n Auto setup running env and try to connect device if no device is connected.\n\n Args:\n basedir: basedir of script, __file__ is also acceptable.\n devices: connect_device uri in list.\n firing_time: Game starts taking time, this value should be set larger in old machine.\n logdir: log dir for script report, default is None for no log, set to ``True`` for ``<basedir>/log``.\n project_root: Project root dir for `using` api.\n compress: The compression rate of the screenshot image, integer in range [1, 99], default is 10\n\n Examples:\n auto_setup(__file__)\n auto_setup(__file__, devices=[\"Android://127.0.0.1:5037/SJE5T17B17\"],\n ... logdir=True, project_root=r\"D:\\\\test\\\\logs\", compress=90)\n \"\"\"\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n startup_time = Timer(firing_time).start()\n for dev in devices:\n while not startup_time.reached():\n try:\n connect_device(dev)\n break\n except ElementNotFoundError:\n time.sleep(3)\n if startup_time.reached():\n raise NotRunningError(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress\n\n \"\"\"\n Device Operations\n \"\"\"\n\n @staticmethod\n def app_is_running() -> bool:\n \"\"\"\n Platforms:\n Windows\n\n Returns:\n Whether app is running\n \"\"\"\n return G.DEVICE.app_is_running()\n\n @staticmethod\n def stop_app(package=None):\n \"\"\"\n Stop the target application on device\n\n Return:\n Has the Windows application stopped, on Android and iOS no return.\n\n Platforms:\n Android, iOS, Windows\n\n Example:\n stop_app(\"com.netease.cloudmusic\")\n stop_app() # only test on Windows\n \"\"\"\n return G.DEVICE.stop_app(package)\n\n @staticmethod\n @logwrap\n def touch(\n v: Template or tuple,\n times: int = 1,\n blind: bool = False,\n interval: float = 0.05,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the touch action on the device screen\n\n Args:\n v: Target to touch, either a ``ImageTemplate`` instance or absolute coordinates (x, y).\n times: How many touches to be performed\n blind: Whether to recognize Template, sometimes we only need to click without caring about the image.\n interval: Time interval between two touches.\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n **kwargs: Platform specific `kwargs`, please refer to corresponding docs.\n\n Returns:\n Final position to be clicked, e.g. (100, 100)\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n Click absolute coordinates:\n touch((100, 100))\n Click 2 times:\n touch((100, 100), times=2)\n Under Android and Windows platforms, you can set the click duration:\n touch((100, 100), duration=2)\n Right click(Windows):\n touch((100, 100), right_click=True)\n \"\"\"\n if isinstance(v, Template):\n if blind:\n center_pos = (v.area[2] + v.area[0]) / 2, (v.area[3] + v.area[1]) / 2\n else:\n center_pos = loop_find(v, timeout=ST.FIND_TIMEOUT, cls=cls, ocr_mode=ocr_mode)\n\n h = v.height * v.ratio()\n w = v.width * v.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(center_pos, h, w)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(interval)\n delay_after_operation()\n return pos\n\n @logwrap\n def find_click(\n self,\n rec_template: Template,\n touch_template: Template = None,\n times: int = 1,\n timeout: float = 1,\n blind: bool = False,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> bool:\n \"\"\"\n Find the template image and click it or another image area.\n\n Args:\n rec_template: \"Template\" instance to be found.\n touch_template: \"ImageTemplate\" instance to be clicked, defaults to None which means click rec_template.\n times: How many touches to be performed.\n timeout: Time interval to wait for the match.\n blind: Whether to recognize Template, same as parameter of touch().\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n bool: Whether the target image appear and click it.\n \"\"\"\n try:\n pos = self.wait(rec_template, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n h = rec_template.height * rec_template.ratio()\n w = rec_template.width * rec_template.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(pos, h, w)\n except TargetNotFoundError:\n return False\n\n if touch_template:\n self.touch(touch_template, times, blind, ocr_mode=ocr_mode, cls=cls)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{touch_template.name}\")\n else:\n self.touch(pos, times)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{rec_template.name}\")\n return True\n\n @staticmethod\n @logwrap\n def exists(v: Template, timeout: float = 0, ocr_mode: int = 0, cls: Type[Ocr] = Ocr) -> bool or tuple:\n \"\"\"\n Check whether given target exists on device screen\n\n Args:\n v: target to be checked\n timeout: time limit, default is 0 which means loop_find will only search once\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n False if target is not found, otherwise returns the coordinates of the target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n if exists(ImageTemplate(r\"tpl1606822430589.png\")):\n touch(ImageTemplate(r\"tpl1606822430589.png\"))\n\n Since ``exists()`` will return the coordinates,\n we can directly click on this return value to reduce one image search:\n\n pos = exists(ImageTemplate(r\"tpl1606822430589.png\"))\n if pos:\n touch(pos)\n \"\"\"\n try:\n pos = loop_find(v, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n except TargetNotFoundError:\n return False\n else:\n return pos\n\n @staticmethod\n @logwrap\n def wait(\n v: Template,\n timeout: float = None,\n interval: float = 0.5,\n interval_func: Callable = None,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> tuple:\n \"\"\"\n Wait to match the Template on the device screen\n\n Args:\n v: target object to wait for, Template instance\n timeout: time interval to wait for the match, default is None which is ``ST.FIND_TIMEOUT``\n interval: time interval in seconds to attempt to find a match\n interval_func: called after each unsuccessful attempt to find the corresponding match\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Raises:\n TargetNotFoundError: raised if target is not found after the time limit expired\n\n Returns:\n coordinates of the matched target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n wait(Template(r\"tpl1606821804906.png\")) # timeout after ST.FIND_TIMEOUT\n # find Template every 3 seconds, timeout after 120 seconds\n wait(Template(r\"tpl1606821804906.png\"), timeout=120, interval=3)\n\n You can specify a callback function every time the search target fails::\n\n def notfound():\n print(\"No target found\")\n wait(Template(r\"tpl1607510661400.png\"), interval_func=notfound)\n \"\"\"\n if timeout is None:\n timeout = ST.FIND_TIMEOUT\n pos = loop_find(v, timeout=timeout, interval=interval, interval_func=interval_func, ocr_mode=ocr_mode, cls=cls)\n\n return pos\n\n @staticmethod\n def swipe(\n v1: Template or tuple,\n v2: Template or tuple = None,\n vector: tuple = None,\n blind1: bool = False,\n blind2: bool = False,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the swipe action on the device screen.\n\n There are two ways of assigning the parameters\n * ``swipe(v1, v2=Template(...))`` # swipe from v1 to v2\n * ``swipe(v1, vector=(x, y))`` # swipe starts at v1 and moves along the vector.\n\n Args:\n v1: the start point of swipe, either a Template instance or absolute coordinates (x, y)\n v2: the end point of swipe, either a Template instance or absolute coordinates (x, y)\n vector: a vector coordinates of swipe action, either absolute coordinates (x, y) or percentage of\n screen e.g.(0.5, 0.5)\n blind1: Whether to recognize Template1, same as parameter of touch().\n blind2: Whether to recognize Template2, same as parameter of touch().\n **kwargs: platform specific `kwargs`, please refer to corresponding docs\n\n Raises:\n general exception when not enough parameters to perform swap action have been provided\n\n Returns:\n Origin position and target position\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n swipe(Template(r\"tpl1606814865574.png\"), vector=[-0.0316, -0.3311])\n swipe((100, 100), (200, 200))\n\n Custom swiping duration and number of steps(Android and iOS)::\n\n # swiping lasts for 1 second, divided into 6 steps\n swipe((100, 100), (200, 200), duration=1, steps=6)\n \"\"\"\n if isinstance(v1, Template):\n if blind1:\n pos1 = (v1.area[2] + v1.area[0]) / 2, (v1.area[3] + v1.area[1]) / 2\n else:\n pos1 = loop_find(v1, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos1 = v1\n\n if v2:\n if isinstance(v2, Template):\n if blind2:\n pos2 = (v2.area[2] + v2.area[0]) / 2, (v2.area[3] + v2.area[1]) / 2\n else:\n pos2 = loop_find(v2, timeout=ST.FIND_TIMEOUT_TMP)\n else:\n pos2 = v2\n elif vector:\n if vector[0] <= 1 and vector[1] <= 1:\n w, h = G.DEVICE.get_current_resolution()\n vector = (int(vector[0] * w), int(vector[1] * h))\n pos2 = (pos1[0] + vector[0], pos1[1] + vector[1])\n else:\n raise ScriptError(\"no enough params for swipe\")\n\n G.DEVICE.swipe(pos1, pos2, **kwargs)\n delay_after_operation()\n logger.info(f\"Swipe {pos1} -> {pos2}\")\n return pos1, pos2\n\n @staticmethod\n def screenshot():\n \"\"\"\n Returns:\n Screenshot image\n \"\"\"\n return G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)\n\n @staticmethod\n def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n \"\"\"\n Returns:\n {\"screen\": filename, \"resolution\": resolution of the screen} or None\n \"\"\"\n return snapshot(filename, msg, quality, max_size)\n\n @staticmethod\n def shell(cmd):\n return shell(cmd)\n\n @staticmethod\n def start_app(package, activity=None):\n start_app(package, activity)\n\n @staticmethod\n def clear_app(package):\n clear_app(package)\n\n @staticmethod\n def install(filepath, **kwargs):\n return install(filepath, **kwargs)\n\n @staticmethod\n def uninstall(package):\n return uninstall(package)\n\n @staticmethod\n def wake():\n wake()\n\n @staticmethod\n def home():\n home()\n\n @staticmethod\n def double_click(v):\n return double_click(v)\n\n @staticmethod\n def pinch(in_or_out='in', center=None, percent=0.5):\n pinch(in_or_out, center, percent)\n\n @staticmethod\n def key_event(keyname, **kwargs):\n keyevent(keyname, **kwargs)\n\n @staticmethod\n def text(txt, enter=True, **kwargs):\n text(txt, enter, **kwargs)\n\n @staticmethod\n def sleep(secs=1.0):\n sleep(secs)\n\n @staticmethod\n def find_all(v):\n return find_all(v)\n\n @staticmethod\n def get_clipboard(*args, **kwargs):\n return get_clipboard(*args, **kwargs)\n\n @staticmethod\n def set_clipboard(content, *args, **kwargs):\n set_clipboard(content, *args, **kwargs)"
},
{
"identifier": "Ocr",
"path": "zafkiel/ocr/ocr.py",
"snippet": "class Ocr:\n # Merge results with box distance <= thres\n merge_thres_x = 0\n merge_thres_y = 0\n\n def __init__(self, button: ImageTemplate, lang=None, name=None):\n \"\"\"\n Args:\n button:\n lang: If None, use in-game language\n name: If None, use button.name\n \"\"\"\n if lang is None:\n lang = Config.SERVER_LANG\n if name is None:\n name = button.name\n\n self.button: ImageTemplate = button\n self.lang: str = lang\n self.name: str = name\n\n @cached_property\n def model(self) -> TextSystem:\n return OCR_MODEL.get_by_lang(self.lang)\n\n @staticmethod\n def pre_process(image):\n \"\"\"\n To be overridden.\n \"\"\"\n return image\n\n @staticmethod\n def after_process(result):\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def format_result(self, result) -> str:\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def ocr_single_line(self, image):\n # pre process\n start_time = time.time()\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n result, _ = self.model.ocr_single_line(image)\n # after proces\n result = self.after_process(result)\n result = self.format_result(result)\n\n cost_time = time.time() - start_time\n logger.debug(f'OCR <{self.name}> cost {cost_time:.2f}s: {result}')\n return result\n\n def filter_detected(self, result: BoxedResult) -> bool:\n \"\"\"\n Return False to drop result.\n To be overridden.\n \"\"\"\n return True\n\n def detect_and_ocr(self, image, direct_ocr=False) -> list[BoxedResult]:\n \"\"\"\n Args:\n image:\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n\n \"\"\"\n # pre process\n start_time = time.time()\n if not direct_ocr:\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n results: list[BoxedResult] = self.model.detect_and_ocr(image)\n # after proces\n for result in results:\n if not direct_ocr:\n result.box += self.button.area[:2]\n result.box = tuple(corner2area(result.box))\n\n results = [result for result in results if self.filter_detected(result)]\n results = merge_buttons(results, thres_x=self.merge_thres_x, thres_y=self.merge_thres_y)\n for result in results:\n result.ocr_text = self.after_process(result.ocr_text)\n\n cost_time = time.time() - start_time\n logger.debug(f\"OCR <{self.name}> cost {cost_time:.2f}s: {', '.join([result.ocr_text for result in results])}\")\n return results\n\n @staticmethod\n def _match_result(\n result: str,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True):\n \"\"\"\n Args:\n result (str):\n keyword_classes: A list of `Keyword` class or classes inherited `Keyword`\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n # Digits will be considered as the index of keyword\n if ignore_digit:\n if result.isdigit():\n return None\n\n # Try in current lang\n for keyword_class in keyword_classes:\n try:\n matched = keyword_class.find(\n result,\n lang=lang,\n ignore_punctuation=ignore_punctuation\n )\n return matched\n except ScriptError:\n continue\n\n return None\n\n def matched_single_line(\n self,\n image,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True\n ):\n \"\"\"\n Args:\n image: Image to detect\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n lang:\n ignore_punctuation:\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n result = self.ocr_single_line(image)\n\n result = self._match_result(\n result,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n )\n\n logger.debug(f'<{self.name}> matched: {str(result)}')\n return result\n\n def _product_button(\n self,\n boxed_result: BoxedResult,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True\n ) -> OcrResultButton:\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n matched_keyword = self._match_result(\n boxed_result.ocr_text,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n ignore_digit=ignore_digit,\n )\n button = OcrResultButton(boxed_result, matched_keyword)\n return button\n\n def matched_ocr(self, image, keyword_classes, direct_ocr=False) -> list[OcrResultButton]:\n \"\"\"\n Match all instances of 'keyword_classes' on the screen.\n\n Args:\n image: Screenshot\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n List of matched OcrResultButton.\n OCR result which didn't matched known keywords will be dropped.\n \"\"\"\n results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n results = [self._product_button(result, keyword_classes) for result in results]\n results = [result for result in results if result.is_keyword_matched]\n\n if results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return results\n\n def ocr_match_keyword(self, image, keyword_instance, direct_ocr=False, mode: int = OCR_EQUAL, threshold=0.75) \\\n -> list[OcrResultButton]:\n \"\"\"\n Match a specified keyword instance on the screen.\n\n Args:\n image: Screenshot\n keyword_instance: Instance of `Keyword` class or its subclass.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n mode: Match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n threshold: Similarity threshold, default 0.75, only work when mode is OCR_SIMILAR.\n\n Returns:\n List of matched OcrResultButton or empty list.\n \"\"\"\n boxed_results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n final_results = []\n for boxed_result in boxed_results:\n for keyword in keyword_instance.keywords_to_find():\n if mode == OCR_EQUAL and boxed_result.ocr_text != keyword:\n continue\n elif mode == OCR_CONTAINS and keyword not in boxed_result.ocr_text:\n continue\n elif mode == OCR_SIMILAR:\n similarity = SequenceMatcher(None, boxed_result.ocr_text, keyword).ratio()\n if similarity < threshold:\n continue\n button = OcrResultButton(boxed_result, keyword_instance)\n final_results.append(button)\n\n if final_results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in final_results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return final_results"
},
{
"identifier": "Page",
"path": "zafkiel/ui/page.py",
"snippet": "class Page:\n \"\"\"\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/page.py\n \"\"\"\n\n # Key: str, page name like \"page_main\"\n # Value: Page, page instance\n all_pages = {}\n\n @classmethod\n def clear_connection(cls):\n for page in cls.all_pages.values():\n page.parent = None\n\n @classmethod\n def init_connection(cls, destination: Page):\n \"\"\"Initialize an A* path finding among pages.\n\n Args:\n destination:\n \"\"\"\n cls.clear_connection()\n\n visited = [destination]\n visited = set(visited)\n while True:\n new = visited.copy()\n for page in visited:\n for link in cls.iter_pages():\n if link in visited:\n continue\n if page in link.links:\n link.parent = page\n new.add(link)\n if len(new) == len(visited):\n break\n visited = new\n\n @classmethod\n def iter_pages(cls, start_page: Page = None):\n pages = list(cls.all_pages.values())\n if start_page is not None and start_page in pages:\n # Move start_page to the front of the list\n pages.remove(start_page)\n pages.insert(0, start_page)\n cls.all_pages = {page.name: page for page in pages}\n return cls.all_pages.values()\n\n @classmethod\n def iter_check_buttons(cls):\n for page in cls.all_pages.values():\n yield page.check_button\n\n def __init__(self, check_button: Template, switch: Switch = None):\n self.check_button = check_button\n self.switch = switch\n self.links = {}\n (filename, line_number, function_name, text) = traceback.extract_stack()[-2]\n self.name = text[:text.find('=')].strip()\n self.parent = None\n Page.all_pages[self.name] = self\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def link(self, button: Template, destination: Page):\n self.links[destination] = button"
},
{
"identifier": "run_once",
"path": "zafkiel/decorator.py",
"snippet": "def run_once(f):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/decorator.py\n Run a function only once, no matter how many times it has been called.\n\n Examples:\n @run_once\n def my_function(foo, bar):\n return foo + bar\n\n while 1:\n my_function()\n\n Examples:\n def my_function(foo, bar):\n return foo + bar\n\n action = run_once(my_function)\n while 1:\n action()\n \"\"\"\n\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n wrapper.has_run = True\n return f(*args, **kwargs)\n\n wrapper.has_run = False\n return wrapper"
},
{
"identifier": "NotRunningError",
"path": "zafkiel/exception.py",
"snippet": "class NotRunningError(Exception):\n pass"
},
{
"identifier": "PageUnknownError",
"path": "zafkiel/exception.py",
"snippet": "class PageUnknownError(Exception):\n pass"
},
{
"identifier": "ScriptError",
"path": "zafkiel/exception.py",
"snippet": "class ScriptError(Exception):\n pass"
},
{
"identifier": "Timer",
"path": "zafkiel/timer.py",
"snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/timer.py\n\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise, it goes wrong if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make program run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__"
},
{
"identifier": "Switch",
"path": "zafkiel/ui/switch.py",
"snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=Template(r\"assets/ON.png\"))\n submarine_hunt.add_state('off', check_button=Template(r\"assets/OFF.png\"))\n\n # Change state to ON\n submarine_view.set(TPL_ON)\n \"\"\"\n\n def __init__(self, name: str = 'Switch', is_selector: bool = False):\n \"\"\"\n Args:\n name:\n is_selector: True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def add_state(self, state: str, check_button: Template, click_button: Template = None):\n \"\"\"\n Args:\n state: Must match check_button.name\n check_button:\n click_button:\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def get_data(self, state: Template) -> dict:\n \"\"\"\n Args:\n state:\n\n Returns:\n Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state.name:\n return row\n\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')"
}
] | from zafkiel.device.template import ImageTemplate as Template
from zafkiel.logger import logger
from zafkiel.device.api import API
from zafkiel.ocr.ocr import Ocr
from zafkiel.ui.page import Page
from zafkiel.decorator import run_once
from zafkiel.exception import NotRunningError, PageUnknownError, ScriptError
from zafkiel.timer import Timer
from zafkiel.ui.switch import Switch | 9,446 |
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
timeout = Timer(10, count=20).start()
while True:
# End
if timeout.reached():
break
# Known pages
for page in Page.iter_pages():
if page.check_button is None:
continue
if self.ui_page_appear(page=page):
self.ui_current['page'] = page
return page
# Unknown page but able to handle
if self.ui_additional():
timeout.reset()
continue
app_check()
# Unknown page, need manual switching
|
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
timeout = Timer(10, count=20).start()
while True:
# End
if timeout.reached():
break
# Known pages
for page in Page.iter_pages():
if page.check_button is None:
continue
if self.ui_page_appear(page=page):
self.ui_current['page'] = page
return page
# Unknown page but able to handle
if self.ui_additional():
timeout.reset()
continue
app_check()
# Unknown page, need manual switching | raise PageUnknownError | 7 | 2023-11-12 09:33:35+00:00 | 12k |
medkit-lib/medkit | medkit/io/doccano.py | [
{
"identifier": "Attribute",
"path": "medkit/core/attribute.py",
"snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attribute. Should be either simple built-in types (int,\n float, bool, str) or collections of these types (list, dict, tuple). If\n you need structured complex data you should create a subclass of\n `Attribute`.\n metadata:\n The metadata of the attribute\n uid:\n The identifier of the attribute\n \"\"\"\n\n label: str\n value: Optional[Any]\n metadata: Dict[str, Any]\n uid: str\n\n def __init__(\n self,\n label: str,\n value: Optional[Any] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.label = label\n self.value = value\n self.metadata = metadata\n\n def __init_subclass__(cls):\n Attribute.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self) -> Dict[str, Any]:\n attribute_dict = dict(\n uid=self.uid,\n label=self.label,\n value=self.value,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, attribute_dict)\n return attribute_dict\n\n def to_brat(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with the brat format\n \"\"\"\n\n return self.value\n\n def to_spacy(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with spaCy\n \"\"\"\n\n return self.value\n\n def copy(self) -> Attribute:\n \"\"\"\n Create a new attribute that is a copy of the current instance, but\n with a new identifier\n\n This is used when we want to duplicate an existing attribute onto a\n different annotation.\n \"\"\"\n return dataclasses.replace(self, uid=generate_id())\n\n @classmethod\n def from_dict(cls, attribute_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates an Attribute from a dict\n\n Parameters\n ----------\n attribute_dict: dict\n A dictionary from a serialized Attribute as generated by to_dict()\n \"\"\"\n\n subclass = cls.get_subclass_for_data_dict(attribute_dict)\n if subclass is not None:\n return subclass.from_dict(attribute_dict)\n\n return cls(\n uid=attribute_dict[\"uid\"],\n label=attribute_dict[\"label\"],\n value=attribute_dict[\"value\"],\n metadata=attribute_dict[\"metadata\"],\n )"
},
{
"identifier": "OperationDescription",
"path": "medkit/core/operation_desc.py",
"snippet": "class OperationDescription:\n \"\"\"Description of a specific instance of an operation\n\n Parameters\n ----------\n uid:\n The unique identifier of the instance described\n name:\n The name of the operation. Can be the same as `class_name` or something\n more specific, for operations with a behavior that can be customized\n (for instance a rule-based entity matcher with user-provided rules, or a\n model-based entity matcher with a user-provided model)\n class_name:\n The name of the class of the operation\n config:\n The specific configuration of the instance\n \"\"\"\n\n uid: str\n name: str\n class_name: Optional[str] = None\n config: Dict[str, Any] = dataclasses.field(default_factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n return dict(uid=self.uid, name=self.name, class_name=self.class_name, config=self.config)"
},
{
"identifier": "ProvTracer",
"path": "medkit/core/prov_tracer.py",
"snippet": "class ProvTracer:\n \"\"\"Provenance tracing component.\n\n `ProvTracer` is intended to gather provenance information about how all data\n generated by medkit. For each data item (for instance an annotation or an\n attribute), `ProvTracer` can tell the operation that created it, the data\n items that were used to create it, and reciprocally, the data items that were\n derived from it (cf. :class:`~Prov`).\n\n Provenance-compatible operations should inform the provenance tracer of each\n data item that through the :meth:`~.add_prov` method.\n\n Users wanting to gather provenance information should instantiate one unique\n `ProvTracer` object and provide it to all operations involved in their data\n processing flow. Once all operations have been executed, they may then\n retrieve provenance info for specific data items through\n :meth:`~.get_prov`, or for all items with :meth:`~.get_provs`.\n\n Composite operations relying on inner operations (such as pipelines)\n shouldn't call :meth:`~.add_prov` method. Instead, they should instantiate\n their own internal `ProvTracer` and provide it to the operations they rely\n on, then use :meth:`~.add_prov_from_sub_tracer` to integrate\n information from this internal sub-provenance tracer into the main\n provenance tracer that was provided to them.\n\n This will build sub-provenance information, that can be retrieved later\n through :meth:`~.get_sub_prov_tracer` or :meth:`~.get_sub_prov_tracers`. The\n inner operations of a composite operation can themselves be composite\n operations, leading to a tree-like structure of nested provenance tracers.\n \"\"\"\n\n def __init__(self, store: Optional[ProvStore] = None, _graph: Optional[ProvGraph] = None):\n \"\"\"\n Parameters\n ----------\n store:\n Store that will contain all traced data items.\n \"\"\"\n if store is None:\n store = create_prov_store()\n if _graph is None:\n _graph = ProvGraph()\n\n self.store: ProvStore = store\n self._graph: ProvGraph = _graph\n\n def add_prov(\n self,\n data_item: IdentifiableDataItem,\n op_desc: OperationDescription,\n source_data_items: List[IdentifiableDataItem],\n ):\n \"\"\"\n Append provenance information about a specific data item.\n\n Parameters\n ----------\n data_item:\n Data item that was created.\n op_desc:\n Description of the operation that created the data item.\n source_data_items:\n Data items that were used by the operation to create the data item.\n \"\"\"\n assert not self._graph.has_node(\n data_item.uid\n ), f\"Provenance of data item with identifier {data_item.uid} was already added\"\n\n self.store.store_data_item(data_item)\n self.store.store_op_desc(op_desc)\n # add source data items to store\n for source_data_item in source_data_items:\n self.store.store_data_item(source_data_item)\n\n # add node to graph\n source_ids = [s.uid for s in source_data_items]\n self._graph.add_node(data_item.uid, op_desc.uid, source_ids)\n\n def add_prov_from_sub_tracer(\n self,\n data_items: List[IdentifiableDataItem],\n op_desc: OperationDescription,\n sub_tracer: ProvTracer,\n ):\n \"\"\"Append provenance information about data items created by a composite\n operation relying on inner operations (such as a pipeline) having its\n own internal sub-provenance tracer.\n\n Parameters\n ----------\n data_items:\n Data items created by the composite operation. Should not include\n internal intermediate data items, only the output of the operation.\n op_desc:\n Description of the composite operation that created the data items.\n sub_tracer:\n Internal sub-provenance tracer of the composite operation.\n \"\"\"\n assert self.store is sub_tracer.store\n self.store.store_op_desc(op_desc)\n\n sub_graph = sub_tracer._graph\n self._graph.add_sub_graph(op_desc.uid, sub_graph)\n\n for data_item in data_items:\n # ignore data items already known\n # (can happen with attributes being copied from one annotation to another)\n if self._graph.has_node(data_item.uid):\n # check operation_id is consistent\n node = self._graph.get_node(data_item.uid)\n if node.operation_id != op_desc.uid:\n raise RuntimeError(\n \"Trying to add provenance for sub graph for data item with uid\"\n f\" {data_item.uid} that already has a node, but with different\"\n \" operation_id\"\n )\n continue\n self._add_prov_from_sub_tracer_for_data_item(data_item.uid, op_desc.uid, sub_graph)\n\n def _add_prov_from_sub_tracer_for_data_item(\n self,\n data_item_id: str,\n operation_id: str,\n sub_graph: ProvGraph,\n ):\n assert not self._graph.has_node(data_item_id)\n assert sub_graph.has_node(data_item_id)\n\n # find source ids\n source_ids = []\n seen = set()\n queue = collections.deque([data_item_id])\n while queue:\n sub_graph_node_id = queue.popleft()\n seen.add(sub_graph_node_id)\n\n sub_graph_node = sub_graph.get_node(sub_graph_node_id)\n if sub_graph_node.operation_id is None:\n source_ids.append(sub_graph_node_id)\n queue.extend(uid for uid in sub_graph_node.source_ids if uid not in seen)\n\n # add new node on main graph representing\n # the data item generation by the composed operation\n self._graph.add_node(data_item_id, operation_id, source_ids)\n\n def has_prov(self, data_item_id: str) -> bool:\n \"\"\"Check if the provenance tracer has provenance information about a\n specific data item.\n\n .. note::\n This will return `False` if we have provenance info about a data\n item but only in a sub-provenance tracer.\n\n Parameters\n ----------\n data_item_id:\n Id of the data item.\n\n Returns\n -------\n bool\n `True` if there is provenance info that can be retrieved with\n :meth:`~get_prov()`.\n \"\"\"\n return self._graph.has_node(data_item_id)\n\n def get_prov(self, data_item_id: str) -> Prov:\n \"\"\"Return provenance information about a specific data item.\n\n Parameters\n ----------\n data_item_id:\n Id of the data item.\n\n Returns\n -------\n Prov\n Provenance info about the data item.\n \"\"\"\n if not self._graph.has_node(data_item_id):\n raise ValueError(\n f\"No provenance info available for data item with id {data_item_id}.\"\n \" Make sure the id is valid and provenance tracking was enabled for\"\n \" the operation that generated it.\"\n )\n\n node = self._graph.get_node(data_item_id)\n return self._build_prov_from_node(node)\n\n def get_provs(self) -> List[Prov]:\n \"\"\"Return all provenance information about all data items known to the tracer.\n\n .. note::\n Nested provenance info from sub-provenance tracers will not be returned.\n\n Returns\n -------\n List[Prov]\n Provenance info about all known data items.\n \"\"\"\n return [self._build_prov_from_node(node) for node in self._graph.get_nodes()]\n\n def has_sub_prov_tracer(self, operation_id: str) -> bool:\n \"\"\"Check if the provenance tracer has a sub-provenance tracer for a\n specific composite operation (such as a pipeline).\n\n .. note::\n This will return `False` if there is a sub-provenance tracer for\n the operation but that is not a direct child (i.e. that is deeper\n in the hierarchy).\n\n Parameters\n -----------\n operation_id:\n Id of the composite operation.\n\n Returns\n -------\n bool\n `True` if there is a sub-provenance tracer for the operation.\n \"\"\"\n return self._graph.has_sub_graph(operation_id)\n\n def get_sub_prov_tracer(self, operation_id: str) -> ProvTracer:\n \"\"\"Return a sub-provenance tracer containing sub-provenance information from a\n specific composite operation.\n\n Parameters\n ----------\n operation_id:\n Id of the composite operation.\n\n Returns\n -------\n ProvTracer\n The sub-provenance tracer containing sub-provenance information from the\n operation.\n \"\"\"\n sub_graph = self._graph.get_sub_graph(operation_id)\n return ProvTracer(store=self.store, _graph=sub_graph)\n\n def get_sub_prov_tracers(self) -> List[ProvTracer]:\n \"\"\"\n Return all sub-provenance tracers of the provenance tracer.\n\n .. note::\n This will not return sub-provenance tracers that are not direct\n children of this tracer (i.e. that are deeper in the hierarchy).\n\n Returns\n -------\n List[ProvTracer]\n All sub-provenance tracers of this provenance tracer.\n \"\"\"\n return [ProvTracer(store=self.store, _graph=sub_graph) for sub_graph in self._graph.get_sub_graphs()]\n\n def _build_prov_from_node(self, node: ProvNode):\n data_item = self.store.get_data_item(node.data_item_id)\n op_desc = self.store.get_op_desc(node.operation_id) if node.operation_id is not None else None\n source_data_items = [self.store.get_data_item(uid) for uid in node.source_ids]\n derived_data_items = [self.store.get_data_item(uid) for uid in node.derived_ids]\n return Prov(data_item, op_desc, source_data_items, derived_data_items)"
},
{
"identifier": "generate_deterministic_id",
"path": "medkit/core/id.py",
"snippet": "def generate_deterministic_id(reference_id: str) -> uuid.UUID:\n \"\"\"Generate a deterministic UUID based on reference_id.\n The generated UUID will be the same if the reference_id is the same.\n\n Parameters\n ----------\n reference_id\n A string representation of an UID\n\n Returns\n -------\n uuid.UUID\n The UUID object\n \"\"\"\n rng = random.Random(reference_id)\n uid = uuid.UUID(int=rng.getrandbits(128))\n return uid"
},
{
"identifier": "generate_id",
"path": "medkit/core/id.py",
"snippet": "def generate_id() -> str:\n return str(uuid.uuid1())"
},
{
"identifier": "span_utils",
"path": "medkit/core/text/span_utils.py",
"snippet": "def _spans_have_same_length_as_text(text, spans):\ndef _lists_have_same_dimension(list_1, list_2):\ndef _list_is_sorted(list_1):\ndef _ranges_are_within_text(text, ranges):\ndef _positions_are_within_text(text, positions):\ndef replace(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n replacement_texts: List[str],\n) -> Tuple[str, List[AnySpan]]:\ndef _replace_in_spans(spans, ranges, replacement_lengths):\ndef remove(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n) -> Tuple[str, List[AnySpan]]:\ndef _remove_in_spans(spans, ranges):\ndef extract(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n) -> Tuple[str, List[AnySpan]]:\ndef _extract_in_spans(spans, ranges):\ndef insert(\n text: str,\n spans: List[AnySpan],\n positions: List[int],\n insertion_texts: List[str],\n) -> Tuple[str, List[AnySpan]]:\ndef _insert_in_spans(spans, positions, insertion_lengths):\ndef move(\n text: str,\n spans: List[AnySpan],\n range: Tuple[int, int],\n destination: int,\n) -> Tuple[str, List[AnySpan]]:\ndef _move_in_spans(spans, range, destination):\ndef concatenate(texts: List[str], all_spans: List[List[AnySpan]]) -> Tuple[str, List[AnySpan]]:\ndef normalize_spans(spans: List[AnySpan]) -> List[Span]:\ndef clean_up_gaps_in_normalized_spans(spans: List[Span], text: str, max_gap_length: int = 3):"
},
{
"identifier": "Entity",
"path": "medkit/core/text/annotation.py",
"snippet": "class Entity(Segment):\n \"\"\"\n Text entity referencing part of an :class:`~medkit.core.text.TextDocument`.\n\n Attributes\n ----------\n uid:\n The entity identifier.\n label:\n The label for this entity (e.g., DISEASE)\n text:\n Text of the entity.\n spans:\n List of spans indicating which parts of the entity text correspond to\n which part of the document's full text.\n attrs:\n Attributes of the entity. Stored in a\n :class:{~medkit.core.EntityAttributeContainer} but can be passed as a list at\n init.\n metadata:\n The metadata of the entity\n keys:\n Pipeline output keys to which the entity belongs to.\n \"\"\"\n\n attrs: EntityAttributeContainer\n\n def __init__(\n self,\n label: str,\n text: str,\n spans: List[AnySpan],\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[EntityAttributeContainer] = EntityAttributeContainer,\n ):\n super().__init__(label, text, spans, attrs, metadata, uid, store, attr_container_class)"
},
{
"identifier": "Relation",
"path": "medkit/core/text/annotation.py",
"snippet": "class Relation(TextAnnotation):\n \"\"\"\n Relation between two text entities.\n\n Attributes\n ----------\n uid:\n The identifier of the relation\n label:\n The relation label\n source_id:\n The identifier of the entity from which the relation is defined\n target_id:\n The identifier of the entity to which the relation is defined\n attrs:\n The attributes of the relation\n metadata:\n The metadata of the relation\n keys:\n Pipeline output keys to which the relation belongs to\n \"\"\"\n\n source_id: str\n target_id: str\n\n def __init__(\n self,\n label: str,\n source_id: str,\n target_id: str,\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[AttributeContainer] = AttributeContainer,\n ):\n super().__init__(\n label=label,\n attrs=attrs,\n metadata=metadata,\n uid=uid,\n attr_container_class=attr_container_class,\n )\n\n self.source_id = source_id\n self.target_id = target_id\n\n def to_dict(self) -> Dict[str, Any]:\n attrs = [a.to_dict() for a in self.attrs]\n relation_dict = dict(\n uid=self.uid,\n label=self.label,\n source_id=self.source_id,\n target_id=self.target_id,\n attrs=attrs,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, relation_dict)\n return relation_dict\n\n @classmethod\n def from_dict(cls, relation_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Relation from a dict\n\n Parameters\n ----------\n relation_dict: dict\n A dictionary from a serialized relation as generated by to_dict()\n \"\"\"\n\n attrs = [Attribute.from_dict(a) for a in relation_dict[\"attrs\"]]\n return cls(\n uid=relation_dict[\"uid\"],\n label=relation_dict[\"label\"],\n source_id=relation_dict[\"source_id\"],\n target_id=relation_dict[\"target_id\"],\n attrs=attrs,\n metadata=relation_dict[\"metadata\"],\n )"
},
{
"identifier": "TextDocument",
"path": "medkit/core/text/document.py",
"snippet": "class TextDocument(dict_conv.SubclassMapping):\n \"\"\"\n Document holding text annotations\n\n Annotations must be subclasses of `TextAnnotation`.\n\n Attributes\n ----------\n uid:\n Unique identifier of the document.\n text:\n Full document text.\n anns:\n Annotations of the document. Stored in an\n :class:`~.text.TextAnnotationContainer` but can be passed as a list at init.\n attrs:\n Attributes of the document. Stored in an\n :class:`~.core.AttributeContainer` but can be passed as a list at init\n metadata:\n Document metadata.\n raw_segment:\n Auto-generated segment containing the full unprocessed document text. To\n get the raw text as an annotation to pass to processing operations:\n\n >>> doc = TextDocument(text=\"hello\")\n >>> raw_text = doc.anns.get(label=TextDocument.RAW_LABEL)[0]\n \"\"\"\n\n RAW_LABEL: ClassVar[str] = \"RAW_TEXT\"\n\n uid: str\n anns: TextAnnotationContainer\n attrs: AttributeContainer\n metadata: Dict[str, Any]\n raw_segment: Segment\n\n def __init__(\n self,\n text: str,\n anns: Optional[Sequence[TextAnnotation]] = None,\n attrs: Optional[Sequence[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if anns is None:\n anns = []\n if attrs is None:\n attrs = []\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.metadata = metadata\n\n # auto-generated raw segment to hold the text\n self.raw_segment = self._generate_raw_segment(text, uid)\n\n self.anns = TextAnnotationContainer(doc_id=self.uid, raw_segment=self.raw_segment)\n for ann in anns:\n self.anns.add(ann)\n\n self.attrs = AttributeContainer(\n owner_id=self.uid,\n )\n\n for attr in attrs:\n self.attrs.add(attr)\n\n @classmethod\n def _generate_raw_segment(cls, text: str, doc_id: str) -> Segment:\n uid = str(generate_deterministic_id(reference_id=doc_id))\n\n return Segment(\n label=cls.RAW_LABEL,\n spans=[Span(0, len(text))],\n text=text,\n uid=uid,\n )\n\n @property\n def text(self) -> str:\n return self.raw_segment.text\n\n def __init_subclass__(cls):\n TextDocument.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self, with_anns: bool = True) -> Dict[str, Any]:\n doc_dict = dict(\n uid=self.uid,\n text=self.text,\n metadata=self.metadata,\n )\n if with_anns:\n doc_dict[\"anns\"] = [a.to_dict() for a in self.anns]\n\n if self.attrs:\n doc_dict[\"attrs\"] = [a.to_dict() for a in self.attrs]\n\n dict_conv.add_class_name_to_data_dict(self, doc_dict)\n return doc_dict\n\n @classmethod\n def from_dict(cls, doc_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a TextDocument from a dict\n\n Parameters\n ----------\n doc_dict: dict\n A dictionary from a serialized TextDocument as generated by to_dict()\n \"\"\"\n\n # if class method is not the same as the TextDocument one\n # (e.g., when subclassing with an overriding method)\n subclass = cls.get_subclass_for_data_dict(doc_dict)\n if subclass is not None:\n return subclass.from_dict(doc_dict)\n\n anns = [TextAnnotation.from_dict(a) for a in doc_dict.get(\"anns\", [])]\n attrs = [Attribute.from_dict(a) for a in doc_dict.get(\"attrs\", [])]\n return cls(\n uid=doc_dict[\"uid\"],\n text=doc_dict[\"text\"],\n anns=anns,\n attrs=attrs,\n metadata=doc_dict[\"metadata\"],\n )\n\n @classmethod\n def from_file(cls, path: os.PathLike, encoding: Optional[str] = \"utf-8\") -> Self:\n \"\"\"\n Create a document from a text file\n\n Parameters\n ----------\n path:\n Path of the text file\n encoding:\n Text encoding to use\n\n Returns\n -------\n TextDocument:\n Text document with contents of `path` as text. The file path is\n included in the document metadata.\n \"\"\"\n\n path = Path(path)\n text = path.read_text(encoding=encoding)\n return cls(text=text, metadata={\"path_to_text\": str(path.absolute())})\n\n @classmethod\n def from_dir(\n cls,\n path: os.PathLike,\n pattern: str = \"*.txt\",\n encoding: Optional[str] = \"utf-8\",\n ) -> List[Self]:\n \"\"\"\n Create documents from text files in a directory\n\n Parameters\n ----------\n path:\n Path of the directory containing text files\n pattern:\n Glob pattern to match text files in `path`\n encoding:\n Text encoding to use\n\n Returns\n -------\n List[TextDocument]:\n Text documents with contents of each file as text\n \"\"\"\n\n path = Path(path)\n files = sorted(path.glob(pattern))\n return [cls.from_file(f, encoding) for f in files]\n\n def get_snippet(self, segment: Segment, max_extend_length: int) -> str:\n \"\"\"Return a portion of the original text containing the annotation\n\n Parameters\n ----------\n segment:\n The annotation\n\n max_extend_length:\n Maximum number of characters to use around the annotation\n\n Returns\n -------\n str:\n A portion of the text around the annotation\n \"\"\"\n spans_normalized = span_utils.normalize_spans(segment.spans)\n start = min(s.start for s in spans_normalized)\n end = max(s.end for s in spans_normalized)\n start_extended = max(start - max_extend_length // 2, 0)\n remaining_max_extend_length = max_extend_length - (start - start_extended)\n end_extended = min(end + remaining_max_extend_length, len(self.text))\n return self.text[start_extended:end_extended]"
},
{
"identifier": "Span",
"path": "medkit/core/text/span.py",
"snippet": "class Span(AnySpan):\n \"\"\"\n Slice of text extracted from the original text\n\n Parameters\n ----------\n start: int\n Index of the first character in the original text\n end: int\n Index of the last character in the original text, plus one\n \"\"\"\n\n start: int\n end: int\n\n @property\n def length(self):\n return self.end - self.start\n\n def to_dict(self) -> Dict[str, Any]:\n span_dict = dict(start=self.start, end=self.end)\n dict_conv.add_class_name_to_data_dict(self, span_dict)\n return span_dict\n\n def overlaps(self, other: Span):\n \"\"\"Test if 2 spans reference at least one character in common\"\"\"\n return (self.start < other.end) and (self.end > other.start)\n\n @classmethod\n def from_dict(cls, span_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Span from a dict\n\n Parameters\n ----------\n span_dict: dict\n A dictionary from a serialized span as generated by to_dict()\n \"\"\"\n return cls(start=span_dict[\"start\"], end=span_dict[\"end\"])"
},
{
"identifier": "get_anns_by_type",
"path": "medkit/io/_common.py",
"snippet": "def get_anns_by_type(medkit_doc: TextDocument, anns_labels: Optional[List[str]] = None) -> Dict[str, TextAnnotation]:\n \"\"\"Filter annotations by labels and return a dictionary by type of annotation.\n\n Parameters\n ----------\n medkit_doc:\n Text document with annotations\n anns_labels:\n Labels to filter annotations.\n If not provided, all annotations will be in the dictionary\n\n Returns\n -------\n Dict[str, TextAnnotation]\n Annotations by type: 'entities', 'relations', and 'segments'.\n\n \"\"\"\n anns_by_type = {\"entities\": [], \"relations\": [], \"segments\": []}\n annotations = medkit_doc.anns.get()\n\n if anns_labels is not None:\n # filter annotations by label\n annotations = [ann for ann in annotations if ann.label in anns_labels]\n if anns_labels and annotations == []:\n # labels_anns were a list but none of the annotations\n # had a label of interest\n labels_str = \",\".join(anns_labels)\n logger.info(f\"No medkit annotations were included because none have '{labels_str}'\" \" as label.\")\n\n for ann in annotations:\n if isinstance(ann, Entity):\n anns_by_type[\"entities\"].append(ann)\n elif isinstance(ann, Relation):\n anns_by_type[\"relations\"].append(ann)\n elif isinstance(ann, Segment):\n anns_by_type[\"segments\"].append(ann)\n return anns_by_type"
}
] | import dataclasses
import enum
import json
import logging
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from zipfile import ZipFile
from typing_extensions import Self
from medkit.core import Attribute, OperationDescription, ProvTracer
from medkit.core.id import generate_deterministic_id, generate_id
from medkit.core.text import Entity, Relation, Span, TextDocument, span_utils
from medkit.io._common import get_anns_by_type | 9,460 | """Convert medkit files to doccano files (.JSONL) for a given task.
For each :class:`~medkit.core.text.TextDocument` a jsonline will be created.
"""
def __init__(
self,
task: DoccanoTask,
anns_labels: Optional[List[str]] = None,
attr_label: Optional[str] = None,
ignore_segments: bool = True,
include_metadata: Optional[bool] = True,
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
anns_labels:
Labels of medkit annotations to convert into doccano annotations.
If `None` (default) all the entities or relations will be converted.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
attr_label:
The label of the medkit attribute that represents the text category.
Useful for :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` converters.
ignore_segments:
If `True` medkit segments will be ignored. Only entities will be
converted to Doccano entities. If `False` the medkit segments will
be converted to Doccano entities as well.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
include_metadata:
Whether include medkit metadata in the converted documents
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
self.uid = uid
self.task = task
self.anns_labels = anns_labels
self.attr_label = attr_label
self.ignore_segments = ignore_segments
self.include_metadata = include_metadata
@property
def description(self) -> OperationDescription:
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def save(self, docs: List[TextDocument], output_file: Union[str, Path]):
"""Convert and save a list of TextDocuments into a doccano file (.JSONL)
Parameters
----------
docs:
List of medkit doc objects to convert
output_file:
Path or string of the JSONL file where to save the converted documents
"""
output_file = Path(output_file)
with open(output_file, mode="w", encoding="utf-8") as fp:
for medkit_doc in docs:
doc_line = self._convert_doc_by_task(medkit_doc)
fp.write(json.dumps(doc_line, ensure_ascii=False) + "\n")
def _convert_doc_by_task(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument into a dictionary depending on the task
Parameters
----------
medkit_doc:
Document to convert
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._convert_doc_relation_extraction(medkit_doc=medkit_doc)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._convert_doc_text_classification(medkit_doc=medkit_doc)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._convert_doc_seq_labeling(medkit_doc=medkit_doc)
def _convert_doc_relation_extraction(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument to a doc_line compatible
with the doccano relation extraction task
Parameters
----------
medkit_doc:
Document to convert, it may contain entities and relations
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation. It may contain
text, entities and relations
"""
doccano_ents_by_medkit_uid = dict()
doccano_relations = []
anns_by_type = get_anns_by_type(medkit_doc, self.anns_labels)
medkit_segments = anns_by_type["entities"]
if not self.ignore_segments:
medkit_segments += anns_by_type["segments"]
for medkit_segment in medkit_segments:
spans = span_utils.normalize_spans(medkit_segment.spans)
| __all__ = [
"DoccanoTask",
"DoccanoClientConfig",
"DoccanoInputConverter",
"DoccanoOutputConverter",
]
logger = logging.getLogger(__name__)
class DoccanoTask(enum.Enum):
"""Supported doccano tasks. The task defines
the type of document to convert.
Attributes
----------
TEXT_CLASSIFICATION
Documents with a category
RELATION_EXTRACTION
Documents with entities and relations (including IDs)
SEQUENCE_LABELING
Documents with entities in tuples
"""
TEXT_CLASSIFICATION = "text_classification"
RELATION_EXTRACTION = "relation_extraction"
SEQUENCE_LABELING = "sequence_labeling"
@dataclasses.dataclass
class DoccanoClientConfig:
"""A class representing the configuration in the doccano client.
The default values are the default values used by doccano.
Attributes
----------
column_text:
Name or key representing the text
column_label:
Name or key representing the label
"""
column_text: str = "text"
column_label: str = "label"
# FIXME: datamodels to factorize in _doccano_utils
@dataclasses.dataclass()
class _DoccanoEntity:
id: int
start_offset: int
end_offset: int
label: str
def to_dict(self) -> Dict[str, Any]:
entity_dict = dict(
id=self.id,
start_offset=self.start_offset,
end_offset=self.end_offset,
label=self.label,
)
return entity_dict
@dataclasses.dataclass()
class _DoccanoEntityTuple:
start_offset: int
end_offset: int
label: str
def to_tuple(self) -> Tuple[int, int, str]:
return (self.start_offset, self.end_offset, self.label)
@dataclasses.dataclass()
class _DoccanoRelation:
id: int
from_id: int
to_id: int
type: str
def to_dict(self) -> Dict[str, Any]:
relation_dict = dict(
id=self.id,
from_id=self.from_id,
to_id=self.to_id,
type=self.type,
)
return relation_dict
@dataclasses.dataclass()
class _DoccanoDocRelationExtraction:
text: str
entities: List[_DoccanoEntity]
relations: List[_DoccanoRelation]
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text: str = doc_line.pop(client_config.column_text)
entities = [_DoccanoEntity(**ann) for ann in doc_line.pop("entities")]
relations = [_DoccanoRelation(**ann) for ann in doc_line.pop("relations")]
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, entities=entities, relations=relations, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text)
doc_dict["entities"] = [ent.to_dict() for ent in self.entities]
doc_dict["relations"] = [rel.to_dict() for rel in self.relations]
doc_dict.update(self.metadata)
return doc_dict
@dataclasses.dataclass()
class _DoccanoDocSeqLabeling:
text: str
entities: List[_DoccanoEntityTuple]
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text = doc_line.pop(client_config.column_text)
entities = [_DoccanoEntityTuple(*ann) for ann in doc_line.pop(client_config.column_label)]
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, entities=entities, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text)
doc_dict["label"] = [ent.to_tuple() for ent in self.entities]
doc_dict.update(self.metadata)
return doc_dict
@dataclasses.dataclass()
class _DoccanoDocTextClassification:
text: str
label: str
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text = doc_line.pop(client_config.column_text)
label = doc_line.pop(client_config.column_label)[0]
if not isinstance(label, str):
raise TypeError(
"The label must be a string. Please check if the document corresponds"
" to a text classification task rather than sequence labeling"
)
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, label=label, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text, label=[str(self.label)])
doc_dict.update(self.metadata)
return doc_dict
class DoccanoInputConverter:
"""Convert doccano files (.JSONL) containing annotations for a given task.
For each line, a :class:`~.core.text.TextDocument` will be created.
The doccano files can be loaded from a directory with zip files or from a jsonl file.
The converter supports custom configuration to define the parameters used by doccano
when importing the data (c.f. :class:`~.io.doccano.DoccanoClientConfig`)
.. warning::
If the option *Count grapheme clusters as one character* was selected
when creating the doccano project, the converted documents are
likely to have alignment problems; the converter does not support this option.
"""
def __init__(
self,
task: DoccanoTask,
client_config: Optional[DoccanoClientConfig] = None,
attr_label: str = "doccano_category",
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
client_config:
Optional client configuration to define default values in doccano interface.
This config can change, for example, the name of the text field or labels.
attr_label:
The label to use for the medkit attribute that represents the doccano category.
This is related to :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` projects.
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
if client_config is None:
client_config = DoccanoClientConfig()
self.uid = uid
self.client_config = client_config
self.task = task
self.attr_label = attr_label
self._prov_tracer: Optional[ProvTracer] = None
def set_prov_tracer(self, prov_tracer: ProvTracer):
"""Enable provenance tracing.
Parameters
----------
prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
@property
def description(self) -> OperationDescription:
"""Contains all the input converter init parameters."""
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def load_from_directory_zip(self, dir_path: Union[str, Path]) -> List[TextDocument]:
"""Create a list of TextDocuments from zip files in a directory.
The zip files should contain a JSONL file coming from doccano.
Parameters
----------
dir_path:
The path to the directory containing zip files.
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
documents = []
for path_zip in sorted(Path(dir_path).glob("*.zip")):
documents.extend(self.load_from_zip(path_zip))
if len(documents) == 0:
logger.warning(f"No .zip nor .jsonl found in '{dir_path}'")
return documents
def load_from_zip(self, input_file: Union[str, Path]) -> List[TextDocument]:
"""
Create a list of TextDocuments from a zip file containing a JSONL file
coming from doccano.
Parameters
----------
input_file:
The path to the zip file containing a docanno JSONL file
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
with tempfile.TemporaryDirectory() as tmpdir:
with ZipFile(input_file, mode="r") as zip_file:
filename = zip_file.namelist()[0]
unzipped_file = Path(tmpdir) / filename
zip_file.extract(filename, tmpdir)
return self.load_from_file(unzipped_file)
def load_from_file(self, input_file: Union[str, Path]) -> List[TextDocument]:
"""Create a list of TextDocuments from a doccano JSONL file.
Parameters
----------
input_file:
The path to the JSONL file containing doccano annotations
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
documents = []
with open(Path(input_file), encoding="utf-8") as fp:
for line in fp:
doc_line = json.loads(line)
doc = self._parse_doc_line(doc_line)
documents.append(doc)
self._check_crlf_character(documents)
return documents
def _check_crlf_character(self, documents: List[TextDocument]):
"""Check if the list of converted documents contains the CRLF character.
This character is the only indicator available to warn
if there are alignment problems in the documents"""
if self.task == DoccanoTask.RELATION_EXTRACTION or self.task == DoccanoTask.SEQUENCE_LABELING:
nb_docs_with_warning = sum(document.text.find("\r\n") != -1 for document in documents)
if nb_docs_with_warning > 0:
logger.warning(
f"{nb_docs_with_warning}/{len(documents)} documents contain"
" '\\r\\n' characters. If you have selected 'Count grapheme"
" clusters as one character' when creating the doccano project,"
" converted documents are likely to have alignment problems.\n"
" Please ignore this message if you did not select this option when"
" creating the project."
)
def _parse_doc_line(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a doc_line into a TextDocument depending on the task
Parameters
----------
doc_line:
A dictionary representing an annotation from doccano
Returns
-------
TextDocument
A document with parsed annotations.
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._parse_doc_line_relation_extraction(doc_line=doc_line)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._parse_doc_line_text_classification(doc_line=doc_line)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._parse_doc_line_seq_labeling(doc_line=doc_line)
def _parse_doc_line_relation_extraction(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with entities and relations
Parameters
----------
doc_line:
Dictionary with doccano annotation
Returns
-------
TextDocument
The document with annotations
"""
try:
doccano_doc = _DoccanoDocRelationExtraction.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
ents_by_doccano_id = dict()
relations = []
for doccano_entity in doccano_doc.entities:
text = doccano_doc.text[doccano_entity.start_offset : doccano_entity.end_offset]
entity = Entity(
text=text,
label=doccano_entity.label,
spans=[Span(doccano_entity.start_offset, doccano_entity.end_offset)],
metadata=dict(doccano_id=doccano_entity.id),
)
ents_by_doccano_id[doccano_entity.id] = entity
if self._prov_tracer is not None:
self._prov_tracer.add_prov(entity, self.description, source_data_items=[])
for doccano_relation in doccano_doc.relations:
relation = Relation(
label=doccano_relation.type,
source_id=ents_by_doccano_id[doccano_relation.from_id].uid,
target_id=ents_by_doccano_id[doccano_relation.to_id].uid,
metadata=dict(doccano_id=doccano_relation.id),
)
relations.append(relation)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(relation, self.description, source_data_items=[])
anns = list(ents_by_doccano_id.values()) + relations
doc = TextDocument(
text=doccano_doc.text,
anns=anns,
metadata=doccano_doc.metadata,
)
return doc
def _parse_doc_line_seq_labeling(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with entities
Parameters
----------
doc_line:
Dictionary with doccano annotation.
Returns
-------
TextDocument
The document with annotations
"""
try:
doccano_doc = _DoccanoDocSeqLabeling.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
entities = []
for doccano_entity in doccano_doc.entities:
text = doccano_doc.text[doccano_entity.start_offset : doccano_entity.end_offset]
entity = Entity(
text=text,
label=doccano_entity.label,
spans=[Span(doccano_entity.start_offset, doccano_entity.end_offset)],
)
entities.append(entity)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(entity, self.description, source_data_items=[])
doc = TextDocument(
text=doccano_doc.text,
anns=entities,
metadata=doccano_doc.metadata,
)
return doc
def _parse_doc_line_text_classification(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with an attribute.
Parameters
----------
doc_line:
Dictionary with doccano annotation.
Returns
-------
TextDocument
The document with its category
"""
try:
doccano_doc = _DoccanoDocTextClassification.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
attr = Attribute(label=self.attr_label, value=doccano_doc.label)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(attr, self.description, source_data_items=[])
doc = TextDocument(text=doccano_doc.text, metadata=doccano_doc.metadata)
doc.attrs.add(attr)
return doc
class DoccanoOutputConverter:
"""Convert medkit files to doccano files (.JSONL) for a given task.
For each :class:`~medkit.core.text.TextDocument` a jsonline will be created.
"""
def __init__(
self,
task: DoccanoTask,
anns_labels: Optional[List[str]] = None,
attr_label: Optional[str] = None,
ignore_segments: bool = True,
include_metadata: Optional[bool] = True,
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
anns_labels:
Labels of medkit annotations to convert into doccano annotations.
If `None` (default) all the entities or relations will be converted.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
attr_label:
The label of the medkit attribute that represents the text category.
Useful for :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` converters.
ignore_segments:
If `True` medkit segments will be ignored. Only entities will be
converted to Doccano entities. If `False` the medkit segments will
be converted to Doccano entities as well.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
include_metadata:
Whether include medkit metadata in the converted documents
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
self.uid = uid
self.task = task
self.anns_labels = anns_labels
self.attr_label = attr_label
self.ignore_segments = ignore_segments
self.include_metadata = include_metadata
@property
def description(self) -> OperationDescription:
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def save(self, docs: List[TextDocument], output_file: Union[str, Path]):
"""Convert and save a list of TextDocuments into a doccano file (.JSONL)
Parameters
----------
docs:
List of medkit doc objects to convert
output_file:
Path or string of the JSONL file where to save the converted documents
"""
output_file = Path(output_file)
with open(output_file, mode="w", encoding="utf-8") as fp:
for medkit_doc in docs:
doc_line = self._convert_doc_by_task(medkit_doc)
fp.write(json.dumps(doc_line, ensure_ascii=False) + "\n")
def _convert_doc_by_task(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument into a dictionary depending on the task
Parameters
----------
medkit_doc:
Document to convert
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._convert_doc_relation_extraction(medkit_doc=medkit_doc)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._convert_doc_text_classification(medkit_doc=medkit_doc)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._convert_doc_seq_labeling(medkit_doc=medkit_doc)
def _convert_doc_relation_extraction(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument to a doc_line compatible
with the doccano relation extraction task
Parameters
----------
medkit_doc:
Document to convert, it may contain entities and relations
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation. It may contain
text, entities and relations
"""
doccano_ents_by_medkit_uid = dict()
doccano_relations = []
anns_by_type = get_anns_by_type(medkit_doc, self.anns_labels)
medkit_segments = anns_by_type["entities"]
if not self.ignore_segments:
medkit_segments += anns_by_type["segments"]
for medkit_segment in medkit_segments:
spans = span_utils.normalize_spans(medkit_segment.spans) | ann_id = generate_deterministic_id(medkit_segment.uid) | 3 | 2023-11-13 16:28:56+00:00 | 12k |
interpretml/LLM-Tabular-Memorization-Checker | tabmemcheck/functions.py | [
{
"identifier": "LLM_Interface",
"path": "tabmemcheck/llm.py",
"snippet": "class LLM_Interface:\n \"\"\"The interface to the language model.\"\"\"\n\n # if true, the tests use the chat_completion function, otherwise the completion function\n chat_mode = False\n\n def completion(self, prompt, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n\n def chat_completion(self, messages, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n raise NotImplementedError"
},
{
"identifier": "ChatWrappedLLM",
"path": "tabmemcheck/llm.py",
"snippet": "class ChatWrappedLLM(LLM_Interface):\n \"\"\"Wrap a base language model (i.e. an LLM_Interface that only implements the completion method) to act as a chat completion model.\n\n The wrapped model take queries via the chat_completion interface. It transforms the messages list into a single textual prompt using the provided prompt_fn.\n \"\"\"\n\n def __init__(self, llm, prompt_fn, ends_with: str = None):\n assert not llm.chat_mode, \"The wrapped model must be a base model.\"\n self.llm = llm\n self.chat_mode = True\n self.wrapper_fn = prompt_fn\n self.ends_with = ends_with\n\n def chat_completion(self, messages, temperature, max_tokens):\n prompt = self.wrapper_fn(messages)\n # print(prompt)\n response = self.llm.completion(prompt, temperature, max_tokens)\n # print(response)\n if (\n self.ends_with is not None\n ): # we frequently use '\\n\\n' as the end of the relevant part of the response\n if self.ends_with in response:\n response = response[: response.find(self.ends_with)]\n return response\n\n def __repr__(self) -> str:\n return self.llm.__repr__()"
},
{
"identifier": "send_chat_completion",
"path": "tabmemcheck/llm.py",
"snippet": "def send_chat_completion(llm: LLM_Interface, messages, max_tokens=None, logfile=None):\n \"\"\"Send chat completion with retrying and logging.\n\n Returns: The response (string))\"\"\"\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.chat_completion(messages, config.temperature, max_tokens)\n if config.sleep > 0.0:\n time.sleep(config.sleep)\n # logging\n log(messages, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_messages(messages)\n if config.print_prompts or config.print_responses or config.print_next_prompt:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response"
},
{
"identifier": "send_completion",
"path": "tabmemcheck/llm.py",
"snippet": "def send_completion(llm: LLM_Interface, prompt, max_tokens=None, logfile=None):\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.completion(prompt, config.temperature, max_tokens)\n # logging\n log(prompt, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_completion(prompt, response)\n elif config.print_responses:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response"
},
{
"identifier": "bcolors",
"path": "tabmemcheck/llm.py",
"snippet": "class bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKCYAN = \"\\033[96m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n # Regular Colors\n Black = \"\\033[0;30m\" # Black\n Red = \"\\033[0;31m\" # Red\n Green = \"\\033[0;32m\" # Green\n Yellow = \"\\033[0;33m\" # Yellow\n Blue = \"\\033[0;34m\" # Blue\n Purple = \"\\033[0;35m\" # Purple\n Cyan = \"\\033[0;36m\" # Cyan\n White = \"\\033[0;37m\" # White\n\n # Background\n On_Black = \"\\033[40m\" # Black\n On_Red = \"\\033[41m\" # Red\n On_Green = \"\\033[42m\" # Green\n On_Yellow = \"\\033[43m\" # Yellow\n On_Blue = \"\\033[44m\" # Blue\n On_Purple = \"\\033[45m\" # Purple\n On_Cyan = \"\\033[46m\" # Cyan\n On_White = \"\\033[47m\" # White"
},
{
"identifier": "statistical_feature_prediction_test",
"path": "tabmemcheck/row_independence.py",
"snippet": "@retry(\n stop=stop_after_attempt(10)\n) # the automated fitting can fail for an unlucky choice of the test rows (I think. at least it can fail with certain probability due to bad label encoding. this is a quick fix)\ndef statistical_feature_prediction_test(\n csv_file, feature_name, num_prefix_rows=5, confidence_level=0.95, verbose=False\n):\n \"\"\"Train a gradient boosted tree and a linear classifer to predict the value of feature {feature_name} in the n-th row of the csv file,\n using all the features of the previous {num_prefix_rows} rows.\n\n Returns: True if the null of no overalp is rejected, False otherwise.\n \"\"\"\n # load the file as a pandas dataframe\n df = utils.load_csv_df(csv_file)\n feature_names = utils.get_feature_names(csv_file)\n\n # auto-adjust the number of prefix rows bases on the size of the dataset\n # (it is more important to have a big test set, so that we can detect strong effects (row id) on small datasets with significance)\n num_prefix_rows = 5\n if len(df) < 1000:\n num_prefix_rows = 3\n if len(df) < 500:\n num_prefix_rows = 2\n if len(df) < 200:\n num_prefix_rows = 1\n\n # we need to make a strict separation between train and test rows\n # this means that we exclude the {num_prefix_rows} rows before any test row from the training set\n test_rows = np.random.choice(\n len(df), size=(len(df) // (1 + num_prefix_rows)) // 2, replace=False\n )\n\n # regression or classification?\n classification = False\n if df[feature_name].dtype == \"object\":\n classification = True\n elif (\n len(df[feature_name].unique()) < 25\n and len(df[feature_name].unique()) / len(df) < 0.05\n ):\n # if the feature takes only a couple of values, classification\n df[feature_name] = df[feature_name].astype(\"category\").cat.codes\n classification = True\n\n # convert all numbers to floats\n for fn in feature_names:\n if df[fn].dtype == \"int64\":\n df[fn] = df[fn].astype(float)\n\n # convert stings to categorical features\n for fn in feature_names:\n if df[fn].dtype == \"object\":\n df[fn] = df[fn].astype(\"category\").cat.codes\n\n # impute all missing values with the mean\n df = df.fillna(df.mean())\n\n # construct the prediction problem\n X_train, X_test = [], []\n y_train, y_test = [], []\n for i_row in range(num_prefix_rows, len(df)):\n # the value of the feature in the test row\n y_i = df[feature_name].iloc[i_row]\n # all the values of the previous num_prefix_rows rows\n X_i = df.iloc[i_row - num_prefix_rows : i_row].values.flatten()\n # is this row train, test, or excluded?\n if i_row in test_rows: # test\n X_test.append(X_i)\n y_test.append(y_i)\n else:\n excluded = False\n for dist in range(num_prefix_rows):\n if i_row + dist + 1 in test_rows: # excluded\n excluded = True\n if not excluded: # train\n X_train.append(X_i)\n y_train.append(y_i)\n X_train, X_test = np.array(X_train), np.array(X_test)\n y_train, y_test = np.array(y_train), np.array(y_test)\n\n # train a gradient boosted tree and logistic/linear regression\n gbtree = XGBRegressor()\n linear_clf = make_pipeline(StandardScaler(), LinearRegression())\n if classification:\n gbtree = XGBClassifier()\n linear_clf = make_pipeline(StandardScaler(), LogisticRegression())\n # ignore convergence warnings etc.\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n gbtree.fit(X_train, y_train)\n linear_clf.fit(X_train, y_train)\n # for the test, we choose the classifier with the lower TRAINING error\n # (we can do this without adjusting the confidence level)\n final_model = gbtree\n if linear_clf.score(X_train, y_train) < gbtree.score(X_train, y_train):\n final_model = linear_clf\n # the final predictions\n y_pred = final_model.predict(X_test)\n\n # evaluation\n if classification:\n # measure the predictive accuracy\n score, ci = utils.accuracy(y_pred, y_test, confidence_level=confidence_level)\n # the best unconditional predictor: always predicting the most common class\n y_pred = np.repeat(np.argmax(np.bincount(y_train)), len(y_test))\n baseline_score, baseline_ci = utils.accuracy(y_pred, y_test)\n if verbose:\n print(f\"Accuracy: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (most common class): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n else:\n # measure the mean squared error\n score, ci = utils.mean_squared_error(\n y_pred, y_test, confidence_level=confidence_level\n )\n # the mean absolute error of the mean\n baseline_score, baseline_ci = utils.mean_squared_error(\n np.repeat(np.mean(y_train), len(y_test)), y_test\n )\n if verbose:\n print(f\"Mean squared error: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (mean): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n\n # is the gbtree significantly better than the baseline?\n if classification:\n if ci.low > baseline_ci.high:\n return True\n else:\n if ci.high < baseline_ci.low:\n return True\n return False"
},
{
"identifier": "prefix_suffix_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def prefix_suffix_chat_completion(\n llm: LLM_Interface,\n prefixes: list[str],\n suffixes: list[str],\n system_prompt: str,\n few_shot=None,\n num_queries=100,\n out_file=None,\n rng=None,\n):\n \"\"\"A basic chat completion function. Takes a list of prefixes and suffixes and a system prompt.\n Sends {num_queries} prompts of the format\n\n System: <system_prompt>\n User: <prefix> |\n Assistant: <suffix> |\n ... | {few_shot} times, or one example from each (prefixes, suffixes) pair in a {few_shot} list.\n User: <prefix> | In the second case, few_shot = [([prefixes], [suffixes]), ..., ([prefixes], [suffixes])]\n Assistant: <suffix> |\n User: <prefix>\n Assistant: <response> (= test suffix?)\n\n The num_queries prefixes and suffixes are randomly selected from the respective lists.\n The function guarantees that the test suffix (as a complete string) is not contained in any of the few-shot prefixes or suffixes.\n\n Stores the results in a csv file.\n\n Returns: the test prefixes, test suffixes, and responses\n \"\"\"\n assert len(prefixes) == len(\n suffixes\n ), \"prefixes and suffixes must have the same length\"\n\n # randomly shuffle the prefixes and suffixes\n if rng is None:\n rng = np.random.default_rng()\n idx = rng.permutation(len(prefixes))\n prefixes = [prefixes[i] for i in idx]\n suffixes = [suffixes[i] for i in idx]\n\n # the number of points to evaluate\n num_points = min(num_queries, len(prefixes))\n\n test_prefixes = []\n test_suffixes = []\n responses = []\n for i_testpoint in range(num_points):\n # system prompt\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt,\n },\n ]\n # few-shot examples?\n if few_shot is not None:\n # if few_shot is an integer, include few_shot examples from the original prefixes and suffixes\n if isinstance(few_shot, int):\n for _ in range(few_shot):\n idx = None\n retries = 0\n # select a random prefix/suffix pair\n while (\n idx is None\n or idx == i_testpoint\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in prefixes[idx]\n or suffixes[i_testpoint] in suffixes[idx]\n ):\n idx = rng.choice(len(prefixes))\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n prefix = prefixes[idx]\n suffix = suffixes[idx]\n messages.append({\"role\": \"user\", \"content\": prefix})\n messages.append({\"role\": \"assistant\", \"content\": suffix})\n # if few_shot is a list of (prefixes, suffixes)-tuples, inlude one example from each tuple\n elif isinstance(few_shot, list):\n for fs_prefixes, fs_suffixes in few_shot:\n fs_prefix, fs_suffix = None, None\n retries = 0\n # select a random prefix/suffix pair\n while (\n fs_prefix is None\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in fs_prefix\n or suffixes[i_testpoint] in fs_suffix\n ):\n fs_idx = rng.choice(len(fs_prefixes))\n fs_prefix = fs_prefixes[fs_idx]\n fs_suffix = fs_suffixes[fs_idx]\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n messages.append({\"role\": \"user\", \"content\": fs_prefix})\n messages.append({\"role\": \"assistant\", \"content\": fs_suffix})\n\n # test observation\n test_prefix = prefixes[i_testpoint]\n test_suffix = suffixes[i_testpoint]\n messages.append({\"role\": \"user\", \"content\": test_prefix})\n response = send_chat_completion(llm, messages)\n # store prefix, suffix and response\n test_prefixes.append(test_prefix)\n test_suffixes.append(test_suffix)\n responses.append(response)\n\n # save the results to file\n if out_file is not None:\n results_df = pd.DataFrame(\n {\n \"prefix\": test_prefixes,\n \"suffix\": test_suffixes,\n \"response\": responses,\n }\n )\n results_df.to_csv(\n out_file,\n index=False,\n )\n\n return test_prefixes, test_suffixes, responses"
},
{
"identifier": "row_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def row_chat_completion(\n llm,\n csv_file,\n system_prompt,\n num_prefix_rows=10,\n num_queries=100,\n few_shot=7,\n out_file=None,\n):\n \"\"\"Row chat completion task. This task ask the LLM to predict the next row in the\n csv file, given the previous rows. This task is the basis for the row completion\n test, and also for the first token test. Uses prefix_suffix_chat_completion.\"\"\"\n # assert that few_shot is an integer\n assert isinstance(few_shot, int), \"For row completion, few_shot must be an integer.\"\n\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # prepare data\n prefixes = []\n suffixes = []\n for idx in range(len(rows) - num_prefix_rows):\n prefixes.append(\"\\n\".join(rows[idx : idx + num_prefix_rows]))\n suffixes.append(rows[idx + num_prefix_rows])\n\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n suffixes,\n system_prompt,\n few_shot=few_shot,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses"
},
{
"identifier": "row_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def row_completion(\n llm,\n csv_file,\n num_prefix_rows=10,\n num_queries=100,\n out_file=None, # TODO support out_file\n):\n \"\"\"Plain language model variant of row_chat_completion\"\"\"\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # choose num_queries rows to complete\n prefixes = []\n suffixes = []\n responses = []\n for idx in np.random.choice(\n len(rows) - num_prefix_rows, num_queries, replace=False\n ):\n # prepare query\n prefix = \"\\n\".join(rows[idx : idx + num_prefix_rows])\n suffix = rows[idx + num_prefix_rows]\n\n # send query\n response = send_completion(llm, prefix, max_tokens=1 + len(suffix))\n\n # keep only the first row in the response\n response = response.strip(\"\\n\").split(\"\\n\")[0]\n\n # store prefix, suffix and response\n prefixes.append(prefix)\n suffixes.append(suffix)\n responses.append(response)\n\n return prefixes, suffixes, responses"
},
{
"identifier": "feature_values_chat_completion",
"path": "tabmemcheck/chat_completion.py",
"snippet": "def feature_values_chat_completion(\n llm: LLM_Interface,\n csv_file: str,\n system_prompt,\n num_queries,\n few_shot=[], # list or integer\n cond_feature_names=[],\n fs_cond_feature_names=[], # a list of lists of conditional feature names for each few-shot example\n add_description=True,\n out_file=None,\n):\n \"\"\"Feature chat completion task. This task asks the LLM to complete the feature values of observations in the dataset.\n\n The prompt format is the following:\n System: <system_prompt>\n |\n | {few_shot} examples from other csv files.\n |\n User: Dataset: <dataset_name>\n Feature Names: Feature 1, Feature 2, ..., Feature n\n Feature Values: Feature 1 = value 1, Feature 2 = value 2, ..., Feature m = value m\n [Target: Feature k]\n Response: Feature m + 1 = value m + 1, ..., Feature n = value n [Feature k = value k]\n\n This can be modified in the following ways:\n - Remove dataset description and feature names ({add_description} parameter)\n - don't provide any conditional features\n - Don't use the feature names, but only the values. (TODO ? or maybe remove, latter for formatter class)\n\n Options:\n - few_shot: use few-shot examples from other csv files (list), or few_shot examples from the same csv file (int)\n - target & fs_targets: if target is not None, then the LLM is asked to complete only the value of the target feature.\n\n The feature names are ordered in the prompt as they are ordered in the csv file. In the future we might want to relax this.\n\n TODO test and debug this function\n \"\"\"\n # TODO assert that all the given feature names are valid (i.e. occur in the dataset, otherwise throw exception)\n\n dataset_name = utils.get_dataset_name(csv_file)\n conditional_sampling = (\n cond_feature_names is not None and len(cond_feature_names) > 0\n )\n\n # if the few-shot argument is a list, then csv_file should not be in there\n # the current option is to remove it (TODO issue warning)\n if isinstance(few_shot, list):\n few_shot = [\n x for x in few_shot if not dataset_name in utils.get_dataset_name(x)\n ]\n\n # if few-shot is an integer, then include few_shot examples from csv_file\n # this is implemented by replacing few_shot and fs_cond_feature_names with the appropriate lists\n if isinstance(few_shot, int):\n few_shot = [csv_file for _ in range(few_shot)]\n fs_cond_feature_names = [cond_feature_names for _ in range(len(few_shot))]\n\n # issue a warning if conditional_sampling, but no fs_cond_feature_names\n if conditional_sampling and len(few_shot) > 0 and len(fs_cond_feature_names) == 0:\n print(\n llm.bcolors.WARNING\n + \"WARNING: feature_chat_completion: Conditional sampling, but no conditional feature names for the few-shot examples provided.\"\n + llm.bcolors.ENDC\n )\n\n # prefixes and suffixes for the main dataset\n if conditional_sampling:\n prefixes, samples = utils.load_cond_samples(\n csv_file, cond_feature_names, add_description=add_description\n )\n else:\n prefix, samples = utils.load_samples(csv_file)\n prefixes = [prefix] * len(samples)\n\n # prefixes and suffixes for the few-shot examples\n few_shot_prefixes_suffixes = []\n for fs_idx, fs_csv_file in enumerate(few_shot):\n if conditional_sampling:\n fs_prefixes, fs_samples = utils.load_cond_samples(\n fs_csv_file,\n fs_cond_feature_names[fs_idx],\n add_description=add_description,\n )\n few_shot_prefixes_suffixes.append((fs_prefixes, fs_samples))\n else:\n fs_prefix, fs_samples = utils.load_samples(fs_csv_file)\n few_shot_prefixes_suffixes.append(\n ([fs_prefix] * len(fs_samples), fs_samples)\n )\n\n # execute chat queries\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n samples,\n system_prompt,\n few_shot=few_shot_prefixes_suffixes,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses"
}
] | import os
import numpy as np
import pandas as pd
import tabmemcheck as tabmem
import tabmemcheck.analysis as analysis
import tabmemcheck.utils as utils
from typing import Any, Union
from difflib import SequenceMatcher
from tabmemcheck.llm import (
LLM_Interface,
ChatWrappedLLM,
send_chat_completion,
send_completion,
bcolors,
)
from tabmemcheck.row_independence import statistical_feature_prediction_test
from tabmemcheck.chat_completion import (
prefix_suffix_chat_completion,
row_chat_completion,
row_completion,
feature_values_chat_completion,
) | 7,972 |
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
_, test_suffixes, responses = feature_values_chat_completion(
llm,
csv_file,
system_prompt,
num_queries,
few_shot,
cond_feature_names,
add_description=False,
out_file=out_file,
)
# parse the model responses
response_df = utils.parse_feature_stings(responses, [feature_name])
test_suffix_df = utils.parse_feature_stings(test_suffixes, [feature_name])
# count number of exact matches
num_exact_matches = np.sum(
response_df[feature_name] == test_suffix_df[feature_name]
)
# print the result
print(
bcolors.BOLD
+ f'Feature Completion Test ("{feature_name}"): '
+ bcolors.ENDC
+ bcolors.Black
+ f"{num_exact_matches}/{num_queries} exact matches."
+ bcolors.ENDC
)
####################################################################################
# First Token Test
####################################################################################
def first_token_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=100,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""First token test: Complete the first token of the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if (
system_prompt == "default"
): # default system prompt? (the first token test asks the model to complete the same task as row completion, only the evaluation is different)
system_prompt = tabmem.config.system_prompts["row-completion"]
# determine the number of digits that the first token should have
num_digits = analysis.build_first_token(csv_file)
# run a feature prediction test to see if the first token is actually random
df = utils.load_csv_df(csv_file)
rows = utils.load_csv_rows(csv_file, header=False)
df["FIRST_TOKEN_TEST_ROW"] = [r[:num_digits] for r in rows]
df["FIRST_TOKEN_TEST_ROW"] = df["FIRST_TOKEN_TEST_ROW"].astype(str)
tmp_csv_file = utils.tmp_csv_file(
df, utils.get_dataset_name(csv_file) + ".csv"
) # save the df to a tmp csv file
|
DEFAULT_FEW_SHOT_CSV_FILES = [
"iris.csv",
"adult-train.csv",
"titanic-train.csv",
"uci-wine.csv",
"california-housing.csv",
]
def __difflib_similar(csv_file_1, csv_file_2):
sm = SequenceMatcher(
None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2)
)
if sm.quick_ratio() > 0.9:
return sm.ratio() > 0.9
return False
def __validate_few_shot_files(csv_file, few_shot_csv_files):
"""check if the csv_file is contained in the few_shot_csv_files."""
dataset_name = utils.get_dataset_name(csv_file)
few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
if dataset_name in few_shot_names:
# replace the dataset_name with open-ml diabetes
few_shot_csv_files = [
x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name
]
few_shot_csv_files.append("openml-diabetes.csv")
# now test with difflib if the dataset contents are very similar
for fs_file in few_shot_csv_files:
if __difflib_similar(csv_file, fs_file):
print(
bcolors.BOLD
+ "Warning: "
+ bcolors.ENDC
+ f"The dataset is very similar to the few-shot dataset {utils.get_dataset_name(fs_file)}."
)
return few_shot_csv_files
def __llm_setup(llm: Union[LLM_Interface, str]):
# if llm is a string, assume open ai model
if isinstance(llm, str):
llm = tabmem.openai_setup(llm)
return llm
def __print_info(csv_file, llm, few_shot_csv_files):
"""Print some information about the csv file and the model."""
print(
bcolors.BOLD
+ "Dataset: "
+ bcolors.ENDC
+ f"{utils.get_dataset_name(csv_file)}"
)
print(bcolors.BOLD + "Model: " + bcolors.ENDC + f"{llm}")
print(
bcolors.BOLD
+ "Few-Shot: "
+ bcolors.ENDC
+ ", ".join(
[utils.get_dataset_name(fs_csv_file) for fs_csv_file in few_shot_csv_files]
)
)
####################################################################################
# All the tests
####################################################################################
def run_all_tests(
csv_file: str,
llm: Union[LLM_Interface, str],
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
feature_name=None,
):
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
__print_info(csv_file, llm, few_shot_csv_files)
feature_names_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# todo feature values
header_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# draw 10 zero-knowledge samples
print(
bcolors.BOLD
+ "Drawing 10 zero-knowledge samples at temperature 0.7:"
+ bcolors.ENDC
)
temp = tabmem.config.temperature
tabmem.config.temperature = 0.7
samples_df = sample(
csv_file, llm, num_queries=10, few_shot_csv_files=few_shot_csv_files
)
# print the data frame unless it is empty
if (not samples_df.empty) and len(samples_df) > 0:
pd.set_option("display.expand_frame_repr", False)
print(samples_df)
if len(samples_df) < 10:
print(f"The model provided {len(samples_df)} valid samples.")
else:
print("The model was not able to provide valid samples.")
tabmem.config.temperature = temp
row_completion_test(csv_file, llm, num_queries=25)
feature_completion_test(csv_file, llm, num_queries=25, feature_name=feature_name)
first_token_test(csv_file, llm, num_queries=25)
####################################################################################
# Feature Names
####################################################################################
def feature_names_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_features: int = None,
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Test if the model knows the names of the features.
The prompt format is:
System: <system_prompt>
User: Dataset: <dataset_name>
Feature 1, Feature 2, ..., Feature n
Response: Feature n+1, Feature n+2, ..., Feature m
This can be modified in the following ways:
- Include few-shot examples from other csv files.
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["feature-names"]
dataset_name = utils.get_dataset_name(csv_file)
feature_names = utils.get_feature_names(csv_file)
# by default, use 1/4 of the features as prefix, but at least one
if num_prefix_features is None:
num_prefix_features = max(1, len(feature_names) // 4)
# remove the current csv file from the few-shot csv files should it be present there
few_shot_csv_files = [x for x in few_shot_csv_files if not dataset_name in x]
# setup for the few-shot examples
fs_dataset_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
fs_feature_names = [
utils.get_feature_names(fs_csv_file) for fs_csv_file in few_shot_csv_files
]
fs_prefix_feature = [
utils.adjust_num_prefix_features(csv_file, num_prefix_features, fs_csv_file)
for fs_csv_file in few_shot_csv_files
]
if llm.chat_mode:
# construt the prompt
prefixes = [
f"Dataset: {dataset_name}. Feature Names: "
+ ", ".join(feature_names[:num_prefix_features])
]
suffixes = [", ".join(feature_names[num_prefix_features:])]
few_shot = []
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
few_shot.append(
(
[
f"Dataset: {fs_dataset_name}. Feature Names: "
+ ", ".join(fs_feature_name[:fs_prefix_feature])
],
[", ".join(fs_feature_name[fs_prefix_feature:])],
)
)
# execute the the prompt
_, _, responses = prefix_suffix_chat_completion(
llm,
prefixes,
suffixes,
system_prompt,
few_shot=few_shot,
num_queries=1,
)
response = responses[0]
else:
# construct the prompt
prompt = ""
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
prompt += (
f"Dataset: {fs_dataset_name}.\nNumber of Features: {len(fs_feature_name)}\nFeature Names: "
+ ", ".join(fs_feature_name)
+ "\n\n"
)
prompt += (
f"Dataset: {dataset_name}\nNumber of Features: {len(feature_names)}\nFeature Names: "
+ ", ".join(feature_names[:num_prefix_features])
+ ", "
)
# execute the prompt
response = send_completion(llm, prompt)
# consider the response only until the first '\n\n'
idx = response.find("\n\n")
if idx != -1:
response = response[:idx]
print(
bcolors.BOLD
+ "Feature Names Test\nFeature Names: "
+ bcolors.ENDC
+ ", ".join(feature_names[num_prefix_features:])
+ bcolors.BOLD
+ "\nModel Generation: "
+ bcolors.ENDC
+ response
)
# TODO do some sort of evaluation
# for example, return true if it completes all but X of the feature names, correcting for upper/lower case
# at least do formatted printing of the results
####################################################################################
# Feature Values
####################################################################################
####################################################################################
# Header Test
####################################################################################
def header_test(
csv_file: str,
llm: Union[LLM_Interface, str],
split_rows: list[int] = [2, 4, 6, 8],
completion_length: int = 500,
few_shot_csv_files: list[str] = DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Header test, using other csv files as few-shot examples.
Splits the csv file at random positions in rows 2, 4, 6, and 8. Performs 1 query for each split. Reports the best completion.
NOTE: This test might fail if the header and rows of the csv file are very long, and the model has a small context window.
NOTE: in the end, this is the case for all of our tests :)
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["header"]
# load the csv file as a single contiguous string. also load the rows to determine offsets within the string
data = utils.load_csv_string(csv_file, header=True)
csv_rows = utils.load_csv_rows(csv_file, header=True)
# load the few-shot examples
few_shot_data = []
for fs_csv_file in few_shot_csv_files:
fs_data = utils.load_csv_string(fs_csv_file, header=True)
few_shot_data.append(fs_data)
# perform the test multiple times, cutting the dataset at random positions in rows split_rows
num_completions = -1
header, completion = None, None
for i_row in split_rows:
offset = np.sum([len(row) for row in csv_rows[: i_row - 1]])
offset += np.random.randint(
len(csv_rows[i_row]) // 3, 2 * len(csv_rows[i_row]) // 3
)
prefixes = [data[:offset]]
suffixes = [data[offset : offset + completion_length]]
few_shot = [
([fs_data[:offset]], [fs_data[offset : offset + completion_length]])
for fs_data in few_shot_data
]
# chat mode: use few-shot examples
if llm.chat_mode:
_, _, response = prefix_suffix_chat_completion(
llm, prefixes, suffixes, system_prompt, few_shot=few_shot, num_queries=1
)
response = response[0]
else: # otherwise, plain completion
response = send_completion(llm, prefixes[0])
# find the first digit where the response and the completion disagree
idx = -1000
for idx, (c, r) in enumerate(zip(data[offset:], response)):
if c != r:
break
if idx == len(response) - 1 and response[idx] == data[offset + idx]:
idx += 1 # no disagreement found, set idx to length of the response
# is this the best completion so far?
if idx > num_completions:
num_completions = idx
header = prefixes[0]
completion = response
# for the printing, we first color all green up to the first disagreement
completion_print = bcolors.Green + completion[:num_completions]
# then color red up to the beginning of the next row, if any
remaining_completion = completion[num_completions:]
idx = remaining_completion.find("\n")
if idx == -1:
completion_print += bcolors.Red + remaining_completion
else:
completion_print += bcolors.Red + remaining_completion[:idx] + "\n"
remaining_completion = remaining_completion[idx + 1 :]
# for all additional rows, green up to the first disagreement, all red after that
completion_rows = remaining_completion.split("\n")
# the corresponding next row in the csv file
data_idx = data[len(header) + num_completions :].find("\n")
data_rows = data[len(header) + num_completions + data_idx + 1 :].split("\n")
for completion_row, data_row in zip(completion_rows, data_rows):
if completion_row == data_row:
completion_print += bcolors.Green + completion_row + "\n"
continue
# not equal, find the first disagreement
idx = -1000
for idx, (c, r) in enumerate(zip(data_row, completion_row)):
if c != r:
break
if idx == len(completion_row) - 1 and completion_row[idx] == data_row[idx]:
idx += 1
# print first part green, second part red
completion_print += (
bcolors.Green
+ completion_row[:idx]
+ bcolors.Red
+ completion_row[idx:]
+ "\n"
)
# remove final new line
completion_print = completion_print.rstrip("\n")
# print the result
print(
bcolors.BOLD
+ "Header Test: "
+ bcolors.ENDC
+ bcolors.Black
+ header
+ completion_print
+ bcolors.ENDC
+ bcolors.BOLD
+ "\nHeader Test Legend: "
+ bcolors.ENDC
+ "Prompt "
+ bcolors.Green
+ "Correct "
+ bcolors.Red
+ "Incorrect"
+ bcolors.ENDC
)
# TODO return true if it completes the given row, as well as the next row.
# TODO count the number of correctly completed rows and print this number
####################################################################################
# Row Completion
####################################################################################
def row_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=50,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""Row completion test: Complete the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["row-completion"]
# what fraction of the rows are duplicates?
rows = utils.load_csv_rows(csv_file)
frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
_, test_suffixes, responses = feature_values_chat_completion(
llm,
csv_file,
system_prompt,
num_queries,
few_shot,
cond_feature_names,
add_description=False,
out_file=out_file,
)
# parse the model responses
response_df = utils.parse_feature_stings(responses, [feature_name])
test_suffix_df = utils.parse_feature_stings(test_suffixes, [feature_name])
# count number of exact matches
num_exact_matches = np.sum(
response_df[feature_name] == test_suffix_df[feature_name]
)
# print the result
print(
bcolors.BOLD
+ f'Feature Completion Test ("{feature_name}"): '
+ bcolors.ENDC
+ bcolors.Black
+ f"{num_exact_matches}/{num_queries} exact matches."
+ bcolors.ENDC
)
####################################################################################
# First Token Test
####################################################################################
def first_token_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=100,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""First token test: Complete the first token of the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if (
system_prompt == "default"
): # default system prompt? (the first token test asks the model to complete the same task as row completion, only the evaluation is different)
system_prompt = tabmem.config.system_prompts["row-completion"]
# determine the number of digits that the first token should have
num_digits = analysis.build_first_token(csv_file)
# run a feature prediction test to see if the first token is actually random
df = utils.load_csv_df(csv_file)
rows = utils.load_csv_rows(csv_file, header=False)
df["FIRST_TOKEN_TEST_ROW"] = [r[:num_digits] for r in rows]
df["FIRST_TOKEN_TEST_ROW"] = df["FIRST_TOKEN_TEST_ROW"].astype(str)
tmp_csv_file = utils.tmp_csv_file(
df, utils.get_dataset_name(csv_file) + ".csv"
) # save the df to a tmp csv file | rejected = statistical_feature_prediction_test( | 5 | 2023-11-14 18:34:51+00:00 | 12k |
WindowsSov8forUs/bestdori_api | bestdori/post.py | [
{
"identifier": "Chart",
"path": "bestdori/charts.py",
"snippet": "class Chart(list[NoteType]):\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n # 初始化\n def __init__(self, chart: list[dict[str, Any]]) -> None:\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n super().__init__()\n for note in chart:\n # 遍历分类添加\n if note['type'] in ['Long', 'Slide']:\n self.append(Slide(**note))\n elif note['type'] == 'BPM':\n self.append(BPM(**note))\n elif note['type'] == 'Single':\n self.append(Single(**note))\n elif note['type'] == 'Directional':\n self.append(Directional(**note))\n else:\n # 删除其他音符\n continue\n return\n \n # 谱面规范化处理\n @classmethod\n def normalize(cls, chart: list[dict[str, Any]]) -> 'Chart':\n '''谱面规范化处理\n\n 参数:\n chart (list[dict[str, Any]]): 待处理谱面\n\n 返回:\n Chart: 处理后谱面\n '''\n normalized_chart: cls = cls(chart)\n # 对谱面进行排序\n normalized_chart.sort(key=lambda x: x.beat)\n # 处理可能出现的 BPM 错位\n if not isinstance(normalized_chart[0], BPM):\n offset: float = -1.0 # 记录 offset 修正\n # 第一位不是 BPM,找寻真正的 BPM 线\n for note in normalized_chart:\n if isinstance(note, BPM):\n offset = note.beat\n break\n if offset < 0: # 没有找到 BPM\n raise ValueError('谱面内未找到 BPM 线。')\n # 对谱面节拍进行修正\n for note in normalized_chart:\n note.beat_move(-offset)\n if isinstance(note, Slide):\n for connection in note.connections:\n if connection.beat < 0:\n connection.beat = 0\n else:\n break\n else:\n if note.beat < 0:\n note.beat = 0\n else:\n break\n \n # 处理可能出现的不合法滑条节点\n for note in normalized_chart:\n if not isinstance(note, Slide):\n continue\n index: int = 0\n for connection in note.connections:\n if index < (len(note.connections) - 1):\n if connection.flick:\n connection.flick = False\n if 0 < index < (len(note.connections) - 1):\n if connection.skill:\n connection.skill = False\n index += 1\n \n # 对谱面节拍进行修正\n if normalized_chart[0].beat != 0:\n offset = normalized_chart[0].beat\n for note in normalized_chart:\n note.beat_move(-offset)\n return normalized_chart\n \n # 谱面数据统计\n def count(self) -> Statistics:\n '''谱面数据统计\n\n 返回:\n Statistics: 统计到的谱面详细数据\n '''\n # 初始化统计数据\n start_beat = 0.0 # 谱面开始 beat 值\n end_beat = 0.0 # 谱面结束 beat 值\n prev_bpm = 120.0 # 上一个 BPM 线的 BPM 值\n prev_bpm_beat = 0.0 # 上一个 BPM 线的 beat 值\n total_notes = 0 # 总物量\n bpm_list: list[dict[str, float]] = [] # BPM 统计列表,统计所有出现的 BPM 及其有效时间\n \n # 遍历谱面数据\n for note in self:\n # 谱面为一个字典列表,每一个 note 都是一个字典\n if isinstance(note, BPM): # 如果当前是 BPM\n if note.bpm >= 0: # 如果当前 BPM 大于等于 0\n # 如果不是谱面一开始的 BPM 线且已有 note 被记录(即已出现过有效 bpm )\n if note.beat > 0 and total_notes > 0:\n if prev_bpm_beat <= start_beat: # 如果上一个 BPM 线先于第一个 note\n prev_bpm_beat = start_beat\n bpm_duration = (note.beat - prev_bpm_beat) * 60.0 / prev_bpm # 计算持续时间\n bpm_flag: bool = False # 检测 BPM 表中是否已存在指定 BPM\n for bpm_dict in bpm_list:\n if bpm_dict['bpm'] == prev_bpm:\n bpm_dict['duration'] += bpm_duration\n bpm_flag = True\n break\n if not bpm_flag: # 如果 BPM 未被记录\n bpm_dict = {\n 'bpm': prev_bpm,\n 'duration': bpm_duration\n }\n bpm_list.append(bpm_dict)\n prev_bpm = note.bpm\n prev_bpm_beat = note.beat\n continue\n \n if isinstance(note, (Single, Directional)): # 如果当前是单键或方向滑键\n # 记录 beat\n if end_beat < note.beat: # 如果当前 beat 更靠后\n end_beat = note.beat # 始终记录结束 beat\n if start_beat <= 0 or start_beat > note.beat: # 如果未记录起始 beat 或已记录的并不是起始 beat\n start_beat = note.beat\n \n total_notes += 1 # 累加一个物量\n continue\n \n if isinstance(note, Slide): # 如果是绿条\n # 绿条将会有一个 `connections` 列表用于记录节点\n for connection in note.connections:\n if not connection.hidden: # 忽略隐藏节点\n # 记录 beat\n if end_beat < connection.beat: # 如果当前 beat 更靠后\n end_beat = connection.beat # 始终记录结束 beat\n if start_beat <= 0 or start_beat > connection.beat: # 如果未记录起始 beat 或已记录的并不是起始 beat\n start_beat = connection.beat\n \n total_notes += 1 # 累加一个物量\n continue\n \n # 当走出遍历后表明谱面已遍历至最后一个 note ,进行最后的处理\n if prev_bpm_beat < end_beat: # 如果最后一个 note 在最后一个 BPM 线之前\n bpm_duration = (end_beat - prev_bpm_beat) * 60.0 / prev_bpm # 计算持续时间\n bpm_flag: bool = False # 检测 BPM 表中是否已存在指定 BPM\n for bpm_dict in bpm_list:\n if bpm_dict['bpm'] == prev_bpm:\n bpm_dict['duration'] += bpm_duration\n bpm_flag = True\n break\n if not bpm_flag: # 如果 BPM 未被记录\n bpm_dict = {\n 'bpm': prev_bpm,\n 'duration': bpm_duration\n }\n bpm_list.append(bpm_dict)\n \n # 遍历 BPM 列表,计算总时长并获取 BPM 数值\n duration = 0.0 # 谱面总持续时长\n bpm_main = 0.0 # 主要 BPM\n bpm_main_dura = 0.0 # 主要 BPM 持续时长\n bpm_min = 2147483647.0 # 最低 BPM\n bpm_max = 0.0 # 最高 BPM\n for bpm_info in bpm_list: # 遍历\n if bpm_info['duration'] > bpm_main_dura: # 如果持续时间更长\n bpm_main_dura = bpm_info['duration']\n bpm_main = bpm_info['bpm']\n if bpm_min > bpm_info['bpm']: # 如果更小\n bpm_min = bpm_info['bpm']\n if bpm_max < bpm_info['bpm']: # 如果更大\n bpm_max = bpm_info['bpm']\n duration += bpm_info['duration'] # 累加持续时长\n \n return Statistics(\n duration,\n total_notes,\n [bpm_min, bpm_max] if bpm_min != bpm_max else [bpm_min],\n bpm_main\n )\n\n # 转换为字典列表对象\n def to_list(self) -> list[dict[str, Any]]:\n '''将 `Chart` 谱面转换为 `list[dict[str, Any]]` 对象'''\n chart_data: list[dict[str, Any]] = []\n for note in self:\n chart_data.append(note.__dict__)\n return chart_data\n \n # 转换为 json 字符串\n def json(self) -> str:\n '''将 `Chart` 谱面转换为 `json` 字符串'''\n return dumps(self.to_list(), ensure_ascii=False)\n\n # 通过 json 字符串转换为 Chart 谱面\n @classmethod\n def from_json(cls, data: str) -> 'Chart':\n '''通过 `json` 字符串转换为 `Chart` 谱面\n\n 参数:\n data (str): 谱面 `json` 字符串\n\n 返回:\n Chart: 谱面对象 `bestdori.chart.Chart`\n '''\n return cls(loads(data))\n \n # 获取官方谱面\n @classmethod\n def get_chart(\n cls,\n id_: int,\n diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert',\n proxy: Optional[str]=None\n ) -> 'Chart':\n '''获取官方谱面\n\n 参数:\n id_ (int): 谱面 ID\n \n diff (Literal['easy', 'normal', 'hard', 'expert', 'special'], optional): 难度名称\n \n proxy (Optional[str], optional): 代理服务器\n\n 返回:\n Chart: 获取到的谱面对象 `bestdori.chart.Chart`\n '''\n response = Api(API['charts']['info'].format(id=id_, diff=diff), proxy).request('get')\n return cls.normalize(response.json())"
},
{
"identifier": "Content",
"path": "bestdori/utils/content.py",
"snippet": "class Content:\n '''内容类'''\n type: str\n '''内容类型'''\n # 初始化\n def __init__(self, values: dict[str, Any]) -> None:\n '''初始化'''\n for key, value in values.items():\n setattr(self, key, value)\n return\n \n # 纯文本\n @staticmethod\n def text(data: str) -> 'Text':\n '''纯文本\n\n 参数:\n data (str): 文本内容\n\n 返回:\n Text: 文本对象 `bestdori.utils.Text`\n '''\n return Text({'type': 'text', 'data': data})\n \n # 换行\n @staticmethod\n def br() -> 'Br':\n '''换行\n\n 返回:\n Br: 换行对象 `bestdori.utils.Br`\n '''\n return Br({'type': 'br'})\n \n # 表情\n @staticmethod\n def emoji(data: str) -> 'Emoji':\n '''表情\n\n 参数:\n data (str): 表情名称\n\n 返回:\n Emoji: 表情对象 `bestdori.utils.Emoji`\n '''\n return Emoji({'type': 'emoji', 'data': data})\n \n # 提及\n @staticmethod\n def mention(data: str) -> 'Mention':\n '''提及\n\n 参数:\n data (str): 提及的用户名\n\n 返回:\n Mention: 提及对象 `bestdori.utils.Mention`\n '''\n return Mention({'type': 'mention', 'data': data})\n \n # 标题\n @staticmethod\n def heading(data: str, margin: Literal['top']='top') -> 'Heading':\n '''标题\n\n 参数:\n data (str): 标题内容\n \n margin (Literal['top'], optional): 页边空白位置\n\n 返回:\n Heading: 标题对象 `bestdori.utils.Heading`\n '''\n return Heading({'type': 'heading', 'data': data, 'margin': margin})\n \n # 图片\n @staticmethod\n def image(objects: list[str], display: Literal[0, 1, 2]=0) -> 'Image':\n '''图片\n\n 参数:\n objects (list[str]): 图片对象网址列表\n \n display (Literal['0', '1', '2'], optional): 显示类型 `0`: 大图 `1`: 缩略图 `2`: 图标\n\n 返回:\n Image: 图片对象 `bestdori.utils.Image`\n '''\n return Image({'type': 'image', 'objects': objects, 'display': display})\n \n # 链接\n @staticmethod\n def link(\n target: Literal[\n 'url',\n 'character-single',\n 'card-single',\n 'costume-single',\n 'event-single',\n 'gacha-single',\n 'song-single',\n 'logincampaign-single',\n 'comic-single',\n 'mission-single'\n ],\n data: str\n ) -> 'Link':\n '''链接\n\n 参数:\n target (str): 链接对象\n \n data (str): 链接信息\n\n 返回:\n Link: 链接对象 `bestdori.utils.Link`\n '''\n if target != 'url':\n if not data.isdigit():\n raise ValueError('非 url 链接对象的 data 必须为数字。')\n return Link({'type': 'link', 'target': target, 'data': data})\n \n # 列表\n @staticmethod\n def list(\n target: Literal[\n 'character-info',\n 'card-info',\n 'card-icon',\n 'costume-info',\n 'event-info',\n 'gacha-info',\n 'song-info',\n 'logincampaign-info',\n 'comic-info',\n 'mission-info'\n ],\n display: Literal[0, 1, 2],\n objects: list[str]\n ) -> 'List':\n '''列表\n\n 参数:\n target (str): 列表对象\n \n display (Literal['0', '1', '2']): 显示类型\n \n objects (list[str]): 列表对象 ID 列表\n\n 返回:\n List: 列表对象 `bestdori.utils.List`\n '''\n return List({'type': 'list', 'target': target, 'display': display, 'objects': objects})"
},
{
"identifier": "API",
"path": "bestdori/utils/utils.py",
"snippet": "API = {\n 'user': {\n 'info': 'user',\n 'login': 'user/login',\n 'me': 'user/me'\n },\n 'post': {\n 'basic': 'post/basic',\n 'details': 'post/details',\n 'list': 'post/list',\n 'tag': 'post/tag',\n 'post': 'post',\n 'find': 'post/find',\n 'like': 'post/like'\n },\n 'charts': {\n 'info': 'charts/{id}/{diff}.json'\n },\n 'characters': {\n 'info': 'characters/{id}.json',\n 'all': 'characters/all.{index}.json'\n },\n 'cards': {\n 'info': 'cards/{id}.json',\n 'all': 'cards/all.{index}.json'\n },\n 'costumes': {\n 'info': 'costumes/{id}.json',\n 'all': 'costumes/all.{index}.json'\n },\n 'events': {\n 'info': 'events/{id}.json',\n 'all': 'events/all.{index}.json',\n 'top': 'eventtop/data'\n },\n 'gacha': {\n 'info': 'gacha/{id}.json',\n 'all': 'gacha/all.{index}.json'\n },\n 'songs': {\n 'info': 'songs/{id}.json',\n 'all': 'songs/all.{index}.json'\n },\n 'loginCampaigns': {\n 'info': 'loginCampaigns/{id}.json',\n 'all': 'loginCampaigns/all.{index}.json'\n },\n 'bands': {\n 'all': 'bands/all.{index}.json',\n 'main': 'bands/main.{index}.json'\n },\n 'upload': {\n 'file': 'upload/file/{hash}',\n 'prepare': 'upload/prepare',\n 'upload': 'upload',\n 'status': 'upload/status/{hash}'\n },\n 'misc': {\n 'llsif': 'misc/llsif.{index}.json'\n },\n 'all': {\n 'skills': 'skills/all.{index}.json',\n 'stamps': 'stamps/all.{index}.json',\n 'degrees': 'degrees/all.{index}.json',\n 'meta': 'songs/meta/all.{index}.json',\n 'archives': 'archives/all.{index}.json',\n 'miracleTicketExchanges': 'miracleTicketExchanges/all.{index}.json',\n 'comics': 'comics/all.{index}.json',\n }\n}"
},
{
"identifier": "ASSETS",
"path": "bestdori/utils/utils.py",
"snippet": "ASSETS = {\n 'characters': {\n 'character_kv_image': 'ui/character_kv_image/{id:>03d}_rip/image.png',\n 'resourceset': 'characters/resourceset/{resource_set_name}_rip/{name}_{type}.png',\n 'livesd': 'characters/livesd/{sd_resource_name}_rip/sdchara.png'\n },\n 'event': {\n 'banner': 'event/{asset_bundle_name}/images_rip/banner.png',\n 'logo': 'event/{asset_bundle_name}/images_rip/logo.png',\n 'topscreen': 'event/{asset_bundle_name}/topscreen_rip/{type}_eventtop.png',\n 'loginbouns': 'event/loginbonus/{asset_bundle_name}_rip/background.png'\n },\n 'songs': {\n 'musicjacket': 'musicjacket/musicjacket{index:>03d}_rip/assets-star-forassetbundle-startapp-musicjacket-musicjacket{index:>03d}-{jacket_image}-jacket.png',\n 'sound': 'sound/bgm{id:>03d}_rip/bgm{id:>03d}.mp3',\n 'musicscore': ''\n },\n 'thumb': {\n 'chara': 'thumb/chara/card{id:>05d}_rip/{resource_set_name}_{type}.png',\n 'degree': 'thumb/degree_rip/{degree_name}.png',\n 'costume': 'thumb/costume/group{id}_rip/{asset_bundle_name}.png',\n },\n 'stamp': {\n 'get': 'stamp/01_rip/{image_name}.png'\n },\n 'homebanner': {\n 'get': 'homebanner_rip/{banner_asset_bundle_name}.png'\n },\n 'gacha': {\n 'screen': 'gacha/screen/gacha{id}_rip/{asset_name}.png'\n },\n 'comic': {\n 'comic': 'comic/comic_{type}/{asset_bundle_name}_rip/{asset_bundle_name}.png',\n 'thumbnail': 'comic/comic_{type}_thumbnail/{asset_bundle_name}_rip/{asset_bundle_name}.png'\n },\n 'missions': {\n 'info': 'missions/{id}.json',\n 'all': 'missions/all.{index}.json'\n },\n 'band': {\n 'logo': 'band/logo/{id:>03d}_rip/{type}.png'\n },\n 'live2d': {\n 'buildData': 'live2d/chara/{asset_bundle_name}_rip/buildData.asset'\n }\n}"
},
{
"identifier": "Api",
"path": "bestdori/utils/network.py",
"snippet": "class Api:\n '''向 Bestdori 发送 API 请求类\n\n 参数:\n api (str): 请求的 API 地址\n \n proxy (Optional[str]): 代理服务器'''\n api: str\n '''请求的 API 地址'''\n proxy: Optional[str]=None\n '''代理服务器'''\n headers: dict[str, str]\n '''请求头'''\n # 初始化\n def __init__(\n self,\n api: str,\n proxy: Optional[str]=None\n ) -> None:\n '''初始化'''\n self.api = api\n self.proxy = proxy\n self.headers = {'Content-Type': 'application/json;charset=UTF-8'}\n return\n \n # 请求发送\n def request(\n self,\n method: Literal['get', 'post'],\n *,\n cookies: Optional[Cookies]=None,\n params: Optional[dict[str, Any]]=None,\n data: Optional[dict[str, Any]]=None,\n files: Optional[dict[str, tuple[str, BufferedReader]]]=None\n ) -> Response:\n '''请求发送\n\n 参数:\n method (Literal['get', 'post']): API 调用方法\n \n cookies (Optional[Cookies], optional): Cookies\n \n params (Optional[dict[str, Any]], optional): 调用参数\n \n data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送\n \n files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数\n\n 返回:\n Response: 收到的响应\n '''\n # 处理接收到的 API\n if self.api.startswith('http://') or self.api.startswith('https://'):\n self.api = self.api\n else:\n self.api = 'https://bestdori.com/api/' + self.api\n # 构建一个请求体\n request = Request(\n method,\n self.api,\n cookies=cookies,\n params=params,\n data=cast(dict, dumps(data)) if data is not None else data,\n files=files,\n headers=self.headers if not self.api.endswith('/upload') else None\n )\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n # 处理接收到的响应\n response.raise_for_status()\n # 判断接收到的响应是否为 json 格式\n if 'application/json' not in (content_type := response.headers.get('content-type', None)):\n if content_type is not None:\n return response\n else:\n raise Exception('接收到的响应没有 content-type。')\n \n if isinstance((response_data := response.json()), dict):\n if (result := response_data.get('result', None)) is not None:\n if result is False:\n if (code := response_data.get('code', None)) is not None:\n if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录\n exception_class = REQUEST_EXCEPTION[code]\n if params is not None:\n raise exception_class(self.api, **params)\n elif data is not None:\n raise exception_class(self.api, **data)\n else:\n raise exception_class(self.api)\n else:\n raise RequestException(self.api, code)\n else:\n raise RequestException(self.api)\n return response"
},
{
"identifier": "Assets",
"path": "bestdori/utils/network.py",
"snippet": "class Assets:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal['jp', 'en', 'tw', 'cn', 'kr']): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器'''\n url: str\n '''请求的资源地址'''\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif']\n '''资源所在服务器'''\n proxy: Optional[str]=None\n '''代理服务器'''\n # 初始化\n def __init__(\n self,\n url: str,\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif'],\n proxy: Optional[str]=None\n ) -> None:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif']): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器\n '''\n self.url = url\n self.server = server\n self.proxy = proxy\n return\n \n # 获取资源连接\n def get_url(self) -> str:\n '''获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_niconi_url()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n return self.url\n \n # 从 card.niconi.co.ni 获取资源连接\n def _get_niconi_url(self) -> str:\n '''从 card.niconi.co.ni 获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n return self.url\n \n # 获取资源\n def get(self) -> bytes:\n '''获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_from_niconi()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content\n \n # 从 card.niconi.co.ni 获取资源\n def _get_from_niconi(self) -> bytes:\n '''从 card.niconi.co.ni 获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content"
},
{
"identifier": "AssetsNotExistError",
"path": "bestdori/exceptions.py",
"snippet": "class AssetsNotExistError(AssetsException):\n '''资源不存在'''\n # 初始化\n def __init__(self, asset_name: str) -> None:\n msg = f'资源 {asset_name} 可能不存在。'\n super().__init__(msg)"
},
{
"identifier": "PostHasNoChartError",
"path": "bestdori/exceptions.py",
"snippet": "class PostHasNoChartError(BaseException):\n '''帖子不是社区谱面'''\n # 初始化\n def __init__(self, post: dict[str, Any]) -> None:\n name = post.get('categoryName', 'DEFAULT_POST')\n msg = f'该帖子类型 {name} 不是社区谱面。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return"
},
{
"identifier": "PostHasNoSongError",
"path": "bestdori/exceptions.py",
"snippet": "class PostHasNoSongError(BaseException):\n '''帖子没有音乐字段'''\n # 初始化\n def __init__(self, post: dict[str, Any]) -> None:\n name = post.get('categoryName', 'DEFAULT_POST')\n msg = f'该帖子类型 {name} 不存在歌曲资源。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return"
}
] | from typing_extensions import overload
from typing import TypedDict, Optional, Literal, Union, TYPE_CHECKING, Any
from .charts import Chart
from .utils.content import Content
from .utils.utils import API, ASSETS
from .utils.network import Api, Assets
from .exceptions import (
AssetsNotExistError,
PostHasNoChartError,
PostHasNoSongError
)
from .user import Me | 10,147 | response = Api(API['post']['basic'], self.proxy).request('get', params={'id': self.id,})
return response.json()
# 获取帖子信息
def get_details(self) -> dict[str, Any]:
'''获取帖子信息
返回:
dict[str, Any]: 帖子详细信息
'''
if len(self._post) <= 0:
# 如果没有帖子内容存储
response = Api(API['post']['details'], self.proxy).request('get', params={'id': self.id,})
if (post := response.json().get('post', None)) is not None:
self._post = dict(post)
else:
raise Exception('无帖子信息获取。')
return self._post
# 获取谱面对象
def get_chart(self) -> Chart:
'''获取谱面对象
返回:
Chart: 谱面对象
'''
post = self.get_details()
if (chart := post.get('chart', None)) is not None:
return Chart.normalize(chart)
else:
raise PostHasNoChartError(post)
# 获取帖子标签
def get_tags(self) -> list[Tag]:
'''获取帖子标签
返回:
list[Tag]: 标签列表
'''
if (tags := self.get_details().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
return []
# 获取帖子内容
def get_content(self) -> str:
'''获取帖子内容
返回:
str: 帖子内容
'''
result: str = ''
if (content := list(self.get_details().get('content', None))) is not None:
for seg in content:
if seg.get('type', None) in ['text', 'link']:
result += seg.get('data', '') + '\n'
elif seg.get('type', None) == 'emoji':
result += f':{seg.get("data", "")}:'
elif seg.get('type', None) == 'br':
result += '\n'
return result
# 获取歌曲信息对象
def get_song(self) -> SongRes:
'''获取歌曲信息对象
返回:
SongInfo: 歌曲音频与封面字节
'''
post = self.get_details()
if (song := post.get('song', None)) is None:
raise PostHasNoSongError(post)
if (type_ := song.get('type', None)) is None:
raise TypeError('该帖子没有歌曲类型。')
result: dict[str, Union[bytes, None]] = {}
if type_ == 'custom': # 自定义歌曲
# 获取歌曲音频
if (audio := song.get('audio', None)) is None:
result['audio'] = None
else:
try:
response = Api(audio, self.proxy).request('get')
response.raise_for_status()
result['audio'] = response.content
except Exception as exception:
print(f'获取自定义歌曲音频时失败:{type(exception).__name__}: {exception}')
result['audio'] = None
# 获取歌曲封面
if (cover := song.get('cover', None)) is None:
result['cover'] = None
else:
try:
response = Api(cover, self.proxy).request('get')
response.raise_for_status()
result['cover'] = response.content
except Exception as exception:
print(f'获取自定义歌曲封面时失败:{type(exception).__name__}: {exception}')
result['cover'] = None
elif type_ == 'bandori': # BanG Dream! 歌曲
# 获取歌曲 ID
if (id_ := song.get('id', None)) is None:
raise ValueError('未能获取歌曲 ID。')
# 获取歌曲信息
info = Api(API['songs']['info'].format(id=id_), self.proxy).request('get').json()
# 获取歌曲所在服务器
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: server = 'jp'
elif published_at[1] is not None: server = 'en'
elif published_at[2] is not None: server = 'tw'
elif published_at[3] is not None: server = 'cn'
elif published_at[4] is not None: server = 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲音频
try:
result['audio'] = Assets(
| '''`bestdori.post`
社区帖子相关操作'''
if TYPE_CHECKING:
# 标签类
class Tag(TypedDict):
'''标签类'''
type: str
'''标签类型'''
data: str
'''标签数据'''
# 歌曲资源类
class SongRes(TypedDict):
'''歌曲资源类'''
audio: Union[bytes, None]
'''音频字节'''
cover: Union[bytes, None]
'''封面字节'''
# 自定义歌曲信息类
class CustomSong(TypedDict):
'''自定义歌曲信息类'''
type: Literal['custom']
'''歌曲类型'''
audio: Optional[str]
'''歌曲音频'''
cover: Optional[str]
'''歌曲封面'''
# 服务器歌曲信息类
class ProvidedSong(TypedDict):
'''服务器歌曲信息类'''
type: Literal['bandori', 'llsif']
'''歌曲类型'''
id: int
'''歌曲 ID'''
# 搜索社区谱面
@overload
def get_list(
proxy: Optional[str]=None,
*,
search: str='',
category_name: Literal['SELF_POST']='SELF_POST',
category_id: Literal['chart']='chart',
tags: list[Tag]=[],
order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC',
limit: int=20,
offset: int=0
) -> dict[str, Any]:
'''搜索社区谱面
```python
# 以 'Arghena' 为关键词,搜索社区谱面
Post.search(search='Arghena', caregory_name='SELF_POST', category_id='chart')
```
参数:
proxy (Optional[str], optional): 代理服务器
search (str, optional): 搜索关键词,默认为空
category_name (Literal['SELF_POST'], optional): 搜索的帖子类型 `SELF_POST`
category_id (Literal['chart', 'text'], optional): 搜索的画廊种类 `chart`
tags (list[Tag], optional): 搜索的标签,默认为空
order (Literal['TIME_DESC', 'TIME_ASC'], optional): 帖子排序,默认时间倒序
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
返回:
dict[str, Any]: 搜索结果
```python
result: bool # 是否有响应
count: int # 搜索到的谱面总数
posts: list[dict[str, Any]] # 列举出的谱面
```
'''
...
# 搜索用户帖子
@overload
def get_list(
proxy: Optional[str]=None,
*,
username: str,
limit: int=20,
offset: int=0,
order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC'
) -> dict[str, Any]:
'''搜索用户帖子
参数:
proxy (Optional[str], optional): 代理服务器
username (str): 用户名
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
order (Literal['TIME_DESC', 'TIME_ASC'], optional): 帖子排序,默认时间倒序
返回:
dict[str, Any]: 搜索结果
```python
result: bool # 是否有响应
count: int # 搜索到的帖子总数
posts: list[dict[str, Any]] # 列举出的帖子
```
'''
...
# 搜索帖子
@overload
def get_list(
proxy: Optional[str]=None,
*,
search: Optional[str]=None,
following: Optional[bool]=None,
category_name: Optional[str]=None,
category_id: Optional[str]=None,
tags: Optional[list[Tag]]=None,
username: Optional[str]=None,
order: Literal['TIME_DESC', 'TIME_ASC'],
limit: int=20,
offset: int=0
) -> dict[str, Any]:
'''搜索帖子
参数:
proxy (Optional[str], optional): 代理服务器
order (Literal['TIME_DESC', 'TIME_ASC']): 帖子排序
search (Optional[str], optional): 搜索关键词
following (Optional[bool], optional): 是否关注
category_name (Optional[str], optional): 画廊名称
category_id (Optional[str], optional): 画廊 ID
tags (Optional[List[Tag]], optional): 帖子标签
username (Optional[str], optional): 用户名
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
返回:
dict[str, Any]: 搜索结果
'''
...
# 搜索帖子
def get_list(proxy: Optional[str]=None, **kwargs: Any) -> dict[str, Any]:
# 去除 None 值字段
kwargs = {key: value for key, value in kwargs.items() if value is not None}
# 将下划线字段名转换为小驼峰字段名
kwargs = {
(
"".join(x.capitalize() if i > 0 else x for i, x in enumerate(key.split("_")))
): value for key, value in kwargs.items() if value is not None
}
response = Api(API['post']['list'], proxy).request('post', data=kwargs)
return response.json()
# 搜索标签
def search_tags(
type_: str,
data: str='',
fuzzy: bool=True,
proxy: Optional[str]=None
) -> list[Tag]:
'''搜索已有标签
参数:
type (str): 标签类型
data (str, optional): 搜索标签数据关键词
fuzzy (bool, optional): 是否使用模糊搜索
proxy (Optional[str], optional): 代理服务器
返回:
list[Tag]: 标签类 `Tag` 列表
'''
response = Api(API['post']['tag'], proxy).request(
'get',
params={
'type': type_,
'data': data,
'fuzzy': fuzzy
}
)
if (tags := response.json().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
raise Exception('搜索标签时出现未知错误。')
# 发表谱面
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
artists: str,
category_id: Literal['chart']='chart',
category_name: Literal['SELF_POST']='SELF_POST',
chart: Chart,
content: list[Content],
diff: Literal[0, 1, 2, 3, 4],
level: int,
song: Union[CustomSong, ProvidedSong],
tags: list[Tag]=[],
title: str
) -> int:
'''发表谱面
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
artists (str): 歌手
category_id (Literal['chart'], optional): 谱面画廊 ID `chart`
category_name (Literal['SELF_POST'], optional): 谱面画廊名称 `SELF_POST`
chart (Chart): 谱面
content (list[Content]): 帖子内容
diff (Literal[0, 1, 2, 3, 4]): 难度
level (int): 等级
song (Union[CustomSong, ProvidedSong]): 歌曲
tags (list[Tag], optional): 谱面标签
title (str): 谱面标题
返回:
int: 谱面 ID
'''
...
# 发表文本帖子
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
category_id: Literal['text']='text',
category_name: Literal['SELF_POST']='SELF_POST',
content: list[Content],
tags: list[Tag]=[],
title: str
) -> int:
'''发表文本帖子
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
category_id (Literal['text'], optional): 帖子画廊 ID `text`
category_name (Literal['SELF_POST'], optional): 帖子画廊名称 `SELF_POST`
content (list[Content]): 帖子内容
tags (list[Tag], optional): 帖子标签
title (str): 帖子标题
返回:
int: 帖子 ID
'''
...
# 发表帖子
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
artists: Optional[str]=None,
category_id: str,
category_name: str,
chart: Optional[Chart]=None,
content: list[Content],
diff: Optional[Literal[0, 1, 2, 3, 4]]=None,
level: Optional[int]=None,
song: Optional[Union[CustomSong, ProvidedSong]]=None,
tags: Optional[list[Tag]]=None,
title: Optional[str]=None
) -> int:
'''发表帖子
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
artists (Optional[str], optional): 歌手
category_id (str): 帖子画廊 ID
category_name (str): 帖子画廊名称
chart (Optional[Chart], optional): 谱面
content (list[Content]): 帖子内容
diff (Optional[Literal[0, 1, 2, 3, 4]], optional): 难度
level (Optional[int], optional): 等级
song (Optional[Union[CustomSong, ProvidedSong]], optional): 歌曲
tags (Optional[list[Tag]], optional): 帖子标签
title (Optional[str], optional): 帖子标题
返回:
int: 帖子 ID
'''
...
# 发表帖子
def post(
me: 'Me',
proxy: Optional[str]=None,
**kwargs: Any
) -> int:
# 转换特定字段
if 'chart' in kwargs:
kwargs['chart'] = kwargs['chart'].to_list()
if 'content' in kwargs:
content = kwargs['content']
kwargs['content'] = [seg.__dict__ for seg in content]
# 去除 None 值字段
kwargs = {key: value for key, value in kwargs.items() if value is not None}
# 将下划线字段名转换为小驼峰字段名
kwargs = {
(
"".join(x.capitalize() if i > 0 else x for i, x in enumerate(key.split("_")))
): value for key, value in kwargs.items() if value is not None
}
response = Api(API['post']['post'], proxy).request(
'post',
cookies=me.cookies,
data=kwargs
)
if (id_ := response.json().get('id', None)) is None:
raise ValueError('发表帖子时出现未知错误。')
return id_
# 查询帖子顺序
def find_post(category_name: str, category_id: str, id_: int, proxy: Optional[str]=None) -> int:
'''查询帖子顺序
参数:
category_name (str): 画廊名称
category_id (str): 画廊 ID
id (int): 查询的帖子 ID
proxy (Optional[str], optional): 代理服务器
Returns:
int: 帖子在该画廊的时间顺序
'''
params = {
'categoryName': category_name,
'categoryId': category_id,
'id': id_
}
response = Api(API['post']['find'], proxy).request('get', params=params)
if (position := response.json().get('position', None)) is None:
raise ValueError('查询帖子顺序时出现未知错误。')
return position
# 社区帖子类
class Post:
'''社区帖子类
参数:
id_ (str): 社区帖子 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''社区帖子类
参数:
id_ (int): 社区帖子 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''社区帖子 ID'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
self._post: dict[str, Any] = {}
'''社区帖子内容'''
return
# 获取帖子基础信息
def get_basic(self) -> dict[str, Any]:
'''获取帖子基础信息
返回:
dict[str, Any]: 基础信息
'''
response = Api(API['post']['basic'], self.proxy).request('get', params={'id': self.id,})
return response.json()
# 获取帖子信息
def get_details(self) -> dict[str, Any]:
'''获取帖子信息
返回:
dict[str, Any]: 帖子详细信息
'''
if len(self._post) <= 0:
# 如果没有帖子内容存储
response = Api(API['post']['details'], self.proxy).request('get', params={'id': self.id,})
if (post := response.json().get('post', None)) is not None:
self._post = dict(post)
else:
raise Exception('无帖子信息获取。')
return self._post
# 获取谱面对象
def get_chart(self) -> Chart:
'''获取谱面对象
返回:
Chart: 谱面对象
'''
post = self.get_details()
if (chart := post.get('chart', None)) is not None:
return Chart.normalize(chart)
else:
raise PostHasNoChartError(post)
# 获取帖子标签
def get_tags(self) -> list[Tag]:
'''获取帖子标签
返回:
list[Tag]: 标签列表
'''
if (tags := self.get_details().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
return []
# 获取帖子内容
def get_content(self) -> str:
'''获取帖子内容
返回:
str: 帖子内容
'''
result: str = ''
if (content := list(self.get_details().get('content', None))) is not None:
for seg in content:
if seg.get('type', None) in ['text', 'link']:
result += seg.get('data', '') + '\n'
elif seg.get('type', None) == 'emoji':
result += f':{seg.get("data", "")}:'
elif seg.get('type', None) == 'br':
result += '\n'
return result
# 获取歌曲信息对象
def get_song(self) -> SongRes:
'''获取歌曲信息对象
返回:
SongInfo: 歌曲音频与封面字节
'''
post = self.get_details()
if (song := post.get('song', None)) is None:
raise PostHasNoSongError(post)
if (type_ := song.get('type', None)) is None:
raise TypeError('该帖子没有歌曲类型。')
result: dict[str, Union[bytes, None]] = {}
if type_ == 'custom': # 自定义歌曲
# 获取歌曲音频
if (audio := song.get('audio', None)) is None:
result['audio'] = None
else:
try:
response = Api(audio, self.proxy).request('get')
response.raise_for_status()
result['audio'] = response.content
except Exception as exception:
print(f'获取自定义歌曲音频时失败:{type(exception).__name__}: {exception}')
result['audio'] = None
# 获取歌曲封面
if (cover := song.get('cover', None)) is None:
result['cover'] = None
else:
try:
response = Api(cover, self.proxy).request('get')
response.raise_for_status()
result['cover'] = response.content
except Exception as exception:
print(f'获取自定义歌曲封面时失败:{type(exception).__name__}: {exception}')
result['cover'] = None
elif type_ == 'bandori': # BanG Dream! 歌曲
# 获取歌曲 ID
if (id_ := song.get('id', None)) is None:
raise ValueError('未能获取歌曲 ID。')
# 获取歌曲信息
info = Api(API['songs']['info'].format(id=id_), self.proxy).request('get').json()
# 获取歌曲所在服务器
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: server = 'jp'
elif published_at[1] is not None: server = 'en'
elif published_at[2] is not None: server = 'tw'
elif published_at[3] is not None: server = 'cn'
elif published_at[4] is not None: server = 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲音频
try:
result['audio'] = Assets( | ASSETS['songs']['sound'].format(id=str(id_)), server, self.proxy | 3 | 2023-11-16 13:09:20+00:00 | 12k |
kampta/asic | commons/logger.py | [
{
"identifier": "images2grid",
"path": "commons/utils.py",
"snippet": "def images2grid(images, **grid_kwargs):\n # images should be (N, C, H, W)\n grid = make_grid(images, **grid_kwargs)\n out = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return out"
},
{
"identifier": "map_minmax",
"path": "commons/utils.py",
"snippet": "def map_minmax(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min"
},
{
"identifier": "compute_pck",
"path": "commons/utils.py",
"snippet": "def compute_pck(pred, target, vis=None, thresholds=None, img_size=256,\n alphas=None):\n if type(target) == list:\n target = torch.cat(target, dim=0).float().cpu()\n else:\n target = target.float().cpu()\n if type(pred) == list:\n pred = torch.cat(pred, dim=0).float().cpu()\n else:\n pred = pred.float().cpu()\n if vis is not None and type(vis) == list:\n vis = torch.cat(vis, dim=0).bool().cpu()\n elif vis is not None:\n vis = vis.bool().cpu()\n else:\n vis = torch.ones(target.size(0)).bool()\n target = target[vis]\n pred = pred[vis]\n\n if alphas is None:\n alphas = torch.arange(0.1, 0.009, -0.01)\n else:\n alphas = torch.tensor(alphas)\n correct = torch.zeros(len(alphas))\n\n err = (pred- target).norm(dim=-1)\n err = err.unsqueeze(0).repeat(len(alphas), 1)\n\n if thresholds is None:\n thresholds = alphas.unsqueeze(-1).repeat(1, err.size(1)) * img_size\n else:\n # Each keypoint within an image pair get same threshold\n # First get threshold (bbox) for all the visible keypoints\n if type(thresholds) == list:\n thresholds = torch.cat(thresholds, dim=0).float().cpu()\n thresholds = thresholds.unsqueeze(-1).repeat(1, vis.size(1))\n thresholds = thresholds[vis]\n # Next compute alpha x threshold for all the keypoints\n thresholds = thresholds.unsqueeze(0).repeat(len(alphas), 1)\n thresholds = thresholds * alphas.unsqueeze(-1)\n\n correct = err < thresholds\n correct = correct.sum(dim=-1) / len(target)\n\n print(\"PCK-Transfer: \", ','.join([f'{pck * 100:.2f}' for pck in correct]))\n return correct"
},
{
"identifier": "sample_tuples",
"path": "commons/utils.py",
"snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples"
},
{
"identifier": "pck_loop",
"path": "commons/utils.py",
"snippet": "def pck_loop(tuples, kps_all, transfer_fn, *args, ignore_interim=False, **kwargs):\n chain_length = tuples.shape[1] - 1\n gt_kps_all = []\n pred_kps_all = []\n vis_all = []\n for ch in range(chain_length):\n src_idx = tuples[:, ch]\n trg_idx = tuples[:, ch+1]\n\n if ch == 0:\n src_kps = kps_all[src_idx]\n else:\n src_kps = pred_kps\n\n pred_kps = transfer_fn(src_kps[..., :2], src_idx, trg_idx,\n *args, **kwargs)\n\n gt_kps_all.append(kps_all[trg_idx][..., :2])\n pred_kps_all.append(pred_kps)\n \n if ch == 0:\n vis = kps_all[src_idx][..., 2] * kps_all[trg_idx][..., 2] > 0\n else:\n vis = vis * kps_all[trg_idx][..., 2] > 0\n vis_all.append(vis)\n\n if ignore_interim:\n return gt_kps_all[-1], pred_kps_all[-1], vis_all[-1]\n else:\n vis_all = torch.cat(vis_all)\n gt_kps_all = torch.cat(gt_kps_all)\n pred_kps_all = torch.cat(pred_kps_all)\n return gt_kps_all, pred_kps_all, vis_all"
},
{
"identifier": "splat_points",
"path": "commons/draw.py",
"snippet": "@torch.inference_mode()\ndef splat_points(images, points, sigma, opacity, colorscale='turbo',\n colors=None, alpha_channel=None, blend_alg='alpha'):\n \"\"\"\n Highly efficient GPU-based splatting algorithm. This function is a wrapper\n for Splat2D to overlay points on images. For highest performance, use the\n colors argument directly instead of colorscale.\n images: (N, C, H, W) tensor in [-1, +1]\n points: (N, P, 2) tensor with values in [0, resolution - 1]\n (can be sub-pixel/non-integer coordinates)\n Can also be (N, K, P, 2) tensor, in which case points[:, i]\n gets a unique colorscale\n Expects points in (x, y) order.\n sigma: either float or (N,) tensor with values > 0\n controls the size of the splatted points\n opacity: float in [0, 1], controls the opacity of the splatted points\n colorscale: [Optional] str (or length-K list of str if points is size\n (N, K, P, 2)) indicating the Plotly colorscale to visualize\n points with\n colors: [Optional] (N, P, 3) tensor (or (N, K*P, 3)). If specified,\n colorscale will be ignored. Computing the colorscale\n often takes several orders of magnitude longer than the GPU-based\n splatting, so pre-computing the colors and passing them here\n instead of using the colorscale argument can provide a significant\n speed-up.\n alpha_channel: [Optional] (N, P, 1) tensor (or (N, K*P, 1)). If specified,\n colors will be blended into the output image based on the\n opacity values in alpha_channel (between 0 and 1).\n blend_alg: [Optiona] str. Specifies the blending algorithm to use when\n merging points into images. Can use alpha compositing ('alpha'),\n Laplacian Pyramid Blending ('laplacian') or a more conservative\n version of Laplacian Blending ('laplacian_light')\n :return (N, C, H, W) tensor in [-1, +1] with points splatted onto images\n \"\"\"\n assert images.dim() == 4 # (N, C, H, W)\n assert points.dim() == 3 or points.dim() == 4 # (N, P, 2) or (N, K, P, 2)\n batch_size = images.size(0)\n # each index in the second dimension gets a unique colorscale\n if points.dim() == 4:\n num_points = points.size(2)\n points = points.reshape(\n points.size(0), points.size(1) * points.size(2), 2) # (N, K*P, 2)\n if colors is None:\n if isinstance(colorscale, str):\n colorscale = [colorscale]\n assert len(colorscale) == points.size(1)\n # (1, K*P, 3)\n colors = torch.cat([\n get_plotly_colors(num_points, c) for c in colorscale], 1)\n colors = colors.repeat(batch_size, 1, 1) # (N, K*P, 3)\n elif colors is None:\n num_points = points.size(1)\n # All batch elements use the same colorscale\n if isinstance(colorscale, str):\n # (N, P, 3)\n colors = get_plotly_colors(\n points.size(1), colorscale).repeat(batch_size, 1, 1)\n else: # Each batch element uses its own colorscale\n assert len(colorscale) == batch_size\n colors = torch.cat([get_plotly_colors(num_points, c)\n for c in colorscale], 0)\n if alpha_channel is None:\n alpha_channel = torch.ones(\n batch_size, points.size(1), 1, device='cuda')\n if isinstance(sigma, (float, int)):\n sigma = torch.tensor(\n sigma, device='cuda', dtype=torch.float).view(1).repeat(batch_size)\n blank_img = torch.zeros(batch_size, images.size(1), images.size(2),\n images.size(3), device='cuda')\n blank_mask = torch.zeros(batch_size, 1, images.size(2), images.size(3),\n device='cuda')\n # (N, C, H, W)\n prop_obj_img = splat2d(blank_img, points, colors, sigma, False)\n # (N, 1, H, W)\n prop_mask_img = splat2d(blank_mask, points, alpha_channel, sigma, True)\n prop_mask_img *= opacity\n if blend_alg == 'alpha':\n # basic alpha-composite\n out = prop_mask_img * prop_obj_img + (1 - prop_mask_img) * images\n elif blend_alg == 'laplacian':\n blender = LaplacianBlender().to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n elif blend_alg == 'laplacian_light':\n blender = LaplacianBlender(levels=3, gaussian_kernel_size=11,\n gaussian_sigma=0.5).to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n return out"
},
{
"identifier": "load_fg_points",
"path": "commons/draw.py",
"snippet": "def load_fg_points(img_mask, resolution=None, normalize=False, device='cuda'):\n # returns points in XY format\n if resolution is None:\n resolution = img_mask.size(-1)\n us = vs = torch.arange(resolution)\n us, vs = torch.meshgrid(us, vs, indexing='xy')\n points = torch.stack([us.reshape(-1), vs.reshape(-1)]).permute(1, 0)\n points = points.unsqueeze(0).expand(img_mask.size(0), -1, -1)\n points = points.to(device)\n\n img_mask = img_mask.float()\n if len(img_mask.shape) == 3:\n img_mask = img_mask.unsqueeze(1)\n scale_factor = resolution / img_mask.size(2)\n if resolution != img_mask.size(2): # resize the mask:\n img_mask = F.interpolate(img_mask, scale_factor=scale_factor,\n mode='bilinear')\n\n img_mask = img_mask.squeeze(1)\n points_alpha = img_mask.reshape(img_mask.size(0), -1)\n points = points / (resolution-1)\n if not normalize:\n points *= (img_mask.size(2)/scale_factor-1)\n\n colors = color_wheel_fast_smooth(resolution).to(device)\n colors = colors.reshape(1, -1, 3).expand(img_mask.size(0), -1, -1)\n\n return points, points_alpha, colors"
},
{
"identifier": "concat_v",
"path": "commons/draw.py",
"snippet": "def concat_v(*argv, pad=0):\n width = 0\n height = 0\n count = len(argv)\n\n for img in argv:\n height += img.height\n width = max(width, img.width)\n\n dst = Image.new('RGB', (width, height + (count-1)*pad))\n start = 0\n for i, img in enumerate(argv):\n dst.paste(img, (0, start))\n start += img.height + pad\n return dst"
},
{
"identifier": "get_colors",
"path": "commons/draw.py",
"snippet": "def get_colors(N):\n # colors = torch.tensor(sns.color_palette(n_colors=N))\n if N > 15:\n cmap = plt.get_cmap('tab10')\n else:\n cmap = ListedColormap([\n \"red\", \"yellow\", \"blue\", \"lime\", \"magenta\", \"indigo\", \"orange\",\n \"cyan\", \"darkgreen\", \"maroon\", \"black\", \"white\", \"chocolate\",\n \"gray\", \"blueviolet\"])\n colors = np.array([cmap(x)[:3] for x in range(N)])\n\n return colors"
},
{
"identifier": "get_dense_colors",
"path": "commons/draw.py",
"snippet": "def get_dense_colors(points, resolution=256):\n colors = color_wheel_fast_smooth(resolution)\n if len(points.shape) == 2:\n return colors[points[:, 0], points[:, 1]]\n else:\n device = points.device\n N = len(points)\n colors = colors.permute(2, 0, 1).unsqueeze(0).expand(N, -1, -1, -1)\n points = map_minmax(points, 0, resolution-1, -1, 1).unsqueeze(-2)\n colors = F.grid_sample(colors.to(device), points, align_corners=False)\n return colors.squeeze(-1).permute(0, 2, 1)"
},
{
"identifier": "load_text_points",
"path": "commons/draw.py",
"snippet": "def load_text_points(text, pos=None, size=20, rot=0, img_size=256, colorscale='turbo'):\n # Measure the text area\n # font = ImageFont.truetype (r'Roboto-Bold.ttf', size)\n font = ImageFont.load_default()\n wi, hi = font.getbbox(text)[2:]\n\n # Create a dummy source image\n into = Image.new('1', (img_size, img_size), 0)\n # Copy the relevant area from the source image\n if pos is None:\n pos = (img_size // 2 - wi // 2, img_size // 2 - hi // 2)\n img = into.crop((pos[0], pos[1], pos[0] + wi, pos[1] + hi))\n\n # Print into the rotated area\n d = ImageDraw.Draw(img)\n d.text((0, 0), text, font=font, fill = (1))\n\n # Rotate it forward again\n img = img.rotate(rot, expand=1)\n\n # Insert it back into the source image\n into.paste(img, pos)\n text_points = np.where(np.array(into)>0)\n text_points = np.stack(text_points).transpose(1, 0)[:, [1, 0]]\n text_points = torch.from_numpy(np.ascontiguousarray(text_points)).float()\n text_colors = get_plotly_colors(len(text_points), colorscale).squeeze()\n return text_points, text_colors"
},
{
"identifier": "color_wheel_fast_smooth",
"path": "thirdparty/colormap/colormap_flow.py",
"snippet": "def color_wheel_fast_smooth(resolution=512, subdivision=16):\n lim = sqrt(2)\n colorwheel = expand_color_wheel(subdivision)\n N = colorwheel.shape[0]\n xs = torch.linspace(-1, 1, steps=resolution)\n ys = torch.linspace(-1, 1, steps=resolution)\n x, y = torch.meshgrid(xs, ys, indexing='xy')\n r = torch.sqrt(x*x + y*y) # (0, sqrt(2)]\n # https://math.stackexchange.com/questions/1327253/how-do-we-find-out-angle-from-x-y-coordinates\n theta = 2 * torch.arctan(-y / (-x+r)) + PI # [0, 2*PI]\n\n # Already got interpolated theta\n # Interpolate theta\n theta_ind = theta / (2*PI) * (N-1) # [0, N-1]\n theta_ind = torch.round(theta_ind).long()\n color = colorwheel[theta_ind]\n\n # Interpolate radius\n r = (r / lim).unsqueeze(-1)\n color = color * r + torch.ones(resolution, resolution, 3) * (1-r)\n # color = (color.numpy() * 255).astype(np.uint8)\n return color # HWC"
}
] | from torch.utils.tensorboard.writer import SummaryWriter
from PIL import Image
from commons.utils import images2grid, map_minmax, compute_pck, sample_tuples, \
pck_loop
from commons.draw import splat_points, load_fg_points, \
concat_v, get_colors, get_dense_colors, load_text_points
from thirdparty.colormap.colormap_flow import color_wheel_fast_smooth
import torch
import torch.nn.functional as F
import wandb
import numpy as np | 7,507 | # flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
|
@torch.inference_mode()
def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,
vis_denseres=32):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pseudo_kps = dset.pseudo_kps
parts = dset.parts
vis_sample = min(vis_sample, len(dset))
res = dset.img_size
has_gt_kp = dset.kps is not None
has_fixed_pairs = dset.fixed_pairs is not None # SPair
# Run full test dataloader (assuming small dataset)
all_imgs = dset.imgs
all_masks = dset.masks
all_kps = dset.kps
all_flows, _ = stn(all_imgs)
if has_gt_kp:
kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()
kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)
parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()
parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)
parts_cols[-1] = 0
# Text logging
text_kp, text_kp_col = load_text_points('CVPR')
text_kp = text_kp.to(device).unsqueeze(0)
text_kp_col = text_kp_col.to(device).unsqueeze(0)
pairs = sample_tuples(len(dset), count=vis_sample, seed=0)
src_idx, trg_idx = pairs[:, 0], pairs[:, 1]
# Log only once during the training
if train_idx == 0:
# Log images and the mask
writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,
vis_sample, nrow=vis_sample)
writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],
'img_mask', train_idx, vis_sample, nrow=vis_sample)
# Log neural best buddies (sparse)
kp1 = pseudo_kps[src_idx, trg_idx]
kp2 = pseudo_kps[trg_idx, src_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log parts
parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)
writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,
nrow=vis_sample, log_mean_img=False)
# Log groundtruth kp
if has_gt_kp:
kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log kp and top predictions by STN (if kp are available)
if has_gt_kp:
kp1 = all_kps[src_idx][..., :2]
kp_vis = all_kps[src_idx][..., 2]
kp_pred = stn.transfer_points(
kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,
2*vis_sample, log_mean_img=False, nrow=2)
# Log current canon image
canon_grid = canon.get_grid(vis_sample)
if canon_grid.size(1) > 3:
canon_grid = canon_grid[:, :3]
scale_factor = res / canon_grid.size(-1)
canon_grid = F.interpolate(
canon_grid, scale_factor=scale_factor, mode='bilinear')
writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)
# Log dense correspondences
kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],
resolution=vis_denseres)
kp_pred, kp_canon = stn.transfer_points(
kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
return_canon=True, is_flow=True)
colors = map_minmax(kp_col_dense, 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp, sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_canon = splat_points(
torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
flatten(0, 1)
writer.log_image_grid(
stacked, 'kp_pred_dense', train_idx, 3*vis_sample,
log_mean_img=False, nrow=3)
# # Log dense correspondences with text
# text_kp = text_kp.expand(vis_sample, -1, -1)
# text_kp_col = text_kp_col.expand(vis_sample, -1, -1)
# kp_pred, kp_canon = stn.transfer_points(
# text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
# return_canon=True, is_flow=True)
# blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,
# colors=text_kp_col)
# blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,
# opacity=1., colors=text_kp_col)
# blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,
# sigma=0.7, opacity=1., colors=text_kp_col)
# stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
# flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor: | ndarr = images2grid(images, return_as_PIL=True, nrow=nrow, | 0 | 2023-11-14 16:43:16+00:00 | 12k |
AnonymGiant/ViLaM | evaluate.py | [
{
"identifier": "Config",
"path": "lavis/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_config = self._build_opt_list(self.args.options)\n\n config = OmegaConf.load(self.args.cfg_path)\n\n runner_config = self.build_runner_config(config)\n model_config = self.build_model_config(config, **user_config)\n dataset_config = self.build_dataset_config(config)\n\n # Validate the user-provided runner configuration\n # model and dataset configuration are supposed to be validated by the respective classes\n # [TODO] validate the model/dataset configuration\n # self._validate_runner_config(runner_config)\n\n # Override the default configuration with user options.\n self.config = OmegaConf.merge(\n runner_config, model_config, dataset_config, user_config\n )\n\n def _validate_runner_config(self, runner_config):\n \"\"\"\n This method validates the configuration, such that\n 1) all the user specified options are valid;\n 2) no type mismatches between the user specified options and the config.\n \"\"\"\n runner_config_validator = create_runner_config_validator()\n runner_config_validator.validate(runner_config)\n\n def _build_opt_list(self, opts):\n opts_dot_list = self._convert_to_dot_list(opts)\n return OmegaConf.from_dotlist(opts_dot_list)\n\n @staticmethod\n def build_model_config(config, **kwargs):\n model = config.get(\"model\", None)\n assert model is not None, \"Missing model configuration file.\"\n\n model_cls = registry.get_model_class(model.arch)\n assert model_cls is not None, f\"Model '{model.arch}' has not been registered.\"\n\n model_type = kwargs.get(\"model.model_type\", None)\n if not model_type:\n model_type = model.get(\"model_type\", None)\n # else use the model type selected by user.\n\n assert model_type is not None, \"Missing model_type.\"\n\n model_config_path = model_cls.default_config_path(model_type=model_type)\n\n model_config = OmegaConf.create()\n # hiararchy override, customized config > default config\n model_config = OmegaConf.merge(\n model_config,\n OmegaConf.load(model_config_path),\n {\"model\": config[\"model\"]},\n )\n\n return model_config\n\n @staticmethod\n def build_runner_config(config):\n return {\"run\": config.run}\n\n @staticmethod\n def build_dataset_config(config):\n datasets = config.get(\"datasets\", None)\n if datasets is None:\n raise KeyError(\n \"Expecting 'datasets' as the root key for dataset configuration.\"\n )\n\n dataset_config = OmegaConf.create()\n\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n dataset_config_type = datasets[dataset_name].get(\"type\", \"default\")\n dataset_config_path = builder_cls.default_config_path(\n type=dataset_config_type\n )\n\n # hiararchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n OmegaConf.load(dataset_config_path),\n {\"datasets\": {dataset_name: config[\"datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n def _convert_to_dot_list(self, opts):\n if opts is None:\n opts = []\n\n if len(opts) == 0:\n return opts\n\n has_equal = opts[0].find(\"=\") != -1\n\n if has_equal:\n return opts\n\n return [(opt + \"=\" + value) for opt, value in zip(opts[0::2], opts[1::2])]\n\n def get_config(self):\n return self.config\n\n @property\n def run_cfg(self):\n return self.config.run\n\n @property\n def datasets_cfg(self):\n return self.config.datasets\n\n @property\n def model_cfg(self):\n return self.config.model\n\n def pretty_print(self):\n logging.info(\"\\n===== Running Parameters =====\")\n logging.info(self._convert_node_to_json(self.config.run))\n\n logging.info(\"\\n====== Dataset Attributes ======\")\n datasets = self.config.datasets\n\n for dataset in datasets:\n if dataset in self.config.datasets:\n logging.info(f\"\\n======== {dataset} =======\")\n dataset_config = self.config.datasets[dataset]\n logging.info(self._convert_node_to_json(dataset_config))\n else:\n logging.warning(f\"No dataset named '{dataset}' in config. Skipping\")\n\n logging.info(f\"\\n====== Model Attributes ======\")\n logging.info(self._convert_node_to_json(self.config.model))\n\n def _convert_node_to_json(self, node):\n container = OmegaConf.to_container(node, resolve=True)\n return json.dumps(container, indent=4, sort_keys=True)\n\n def to_dict(self):\n return OmegaConf.to_container(self.config)"
},
{
"identifier": "get_rank",
"path": "lavis/common/dist_utils.py",
"snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()"
},
{
"identifier": "init_distributed_mode",
"path": "lavis/common/dist_utils.py",
"snippet": "def init_distributed_mode(args):\n if \"RANK\" in os.environ and \"WORLD_SIZE\" in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.gpu = int(os.environ[\"LOCAL_RANK\"])\n elif \"SLURM_PROCID\" in os.environ:\n args.rank = int(os.environ[\"SLURM_PROCID\"])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print(\"Not using distributed mode\")\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = \"nccl\"\n print(\n \"| distributed init (rank {}, world {}): {}\".format(\n args.rank, args.world_size, args.dist_url\n ),\n flush=True,\n )\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n timeout=datetime.timedelta(\n days=365\n ), # allow auto-downloading and de-compressing\n )\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)"
},
{
"identifier": "setup_logger",
"path": "lavis/common/logger.py",
"snippet": "def setup_logger():\n logging.basicConfig(\n level=logging.INFO if dist_utils.is_main_process() else logging.WARN,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[logging.StreamHandler()],\n )"
},
{
"identifier": "LinearWarmupCosineLRScheduler",
"path": "lavis/common/optims.py",
"snippet": "class LinearWarmupCosineLRScheduler:\n def __init__(\n self,\n optimizer,\n max_epoch,\n min_lr,\n init_lr,\n warmup_steps=0,\n warmup_start_lr=-1,\n **kwargs\n ):\n self.optimizer = optimizer\n\n self.max_epoch = max_epoch\n self.min_lr = min_lr\n\n self.init_lr = init_lr\n self.warmup_steps = warmup_steps\n self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr\n\n def step(self, cur_epoch, cur_step):\n # assuming the warmup iters less than one epoch\n if cur_epoch == 0:\n warmup_lr_schedule(\n step=cur_step,\n optimizer=self.optimizer,\n max_step=self.warmup_steps,\n init_lr=self.warmup_start_lr,\n max_lr=self.init_lr,\n )\n else:\n cosine_lr_schedule(\n epoch=cur_epoch,\n optimizer=self.optimizer,\n max_epoch=self.max_epoch,\n init_lr=self.init_lr,\n min_lr=self.min_lr,\n )"
},
{
"identifier": "LinearWarmupStepLRScheduler",
"path": "lavis/common/optims.py",
"snippet": "class LinearWarmupStepLRScheduler:\n def __init__(\n self,\n optimizer,\n max_epoch,\n min_lr,\n init_lr,\n decay_rate=1,\n warmup_start_lr=-1,\n warmup_steps=0,\n **kwargs\n ):\n self.optimizer = optimizer\n\n self.max_epoch = max_epoch\n self.min_lr = min_lr\n\n self.decay_rate = decay_rate\n\n self.init_lr = init_lr\n self.warmup_steps = warmup_steps\n self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr\n\n def step(self, cur_epoch, cur_step):\n if cur_epoch == 0:\n warmup_lr_schedule(\n step=cur_step,\n optimizer=self.optimizer,\n max_step=self.warmup_steps,\n init_lr=self.warmup_start_lr,\n max_lr=self.init_lr,\n )\n else:\n step_lr_schedule(\n epoch=cur_epoch,\n optimizer=self.optimizer,\n init_lr=self.init_lr,\n min_lr=self.min_lr,\n decay_rate=self.decay_rate,\n )"
},
{
"identifier": "now",
"path": "lavis/common/utils.py",
"snippet": "def now():\n from datetime import datetime\n\n return datetime.now().strftime(\"%Y%m%d%H%M\")[:-1]"
},
{
"identifier": "RunnerBase",
"path": "lavis/runners/runner_base.py",
"snippet": "class RunnerBase:\n \"\"\"\n A runner class to train and evaluate a model given a task and datasets.\n\n The runner uses pytorch distributed data parallel by default. Future release\n will support other distributed frameworks.\n \"\"\"\n\n def __init__(self, cfg, task, model, datasets, job_id):\n self.config = cfg\n self.job_id = job_id\n\n self.task = task\n self.datasets = datasets\n\n self._model = model\n\n self._wrapped_model = None\n self._device = None\n self._optimizer = None\n self._scaler = None\n self._dataloaders = None\n self._lr_sched = None\n\n self.start_epoch = 0\n\n # self.setup_seeds()\n self.setup_output_dir()\n\n @property\n def device(self):\n if self._device is None:\n self._device = torch.device(self.config.run_cfg.device)\n\n return self._device\n\n @property\n def use_distributed(self):\n return self.config.run_cfg.distributed\n\n @property\n def model(self):\n \"\"\"\n A property to get the DDP-wrapped model on the device.\n \"\"\"\n # move model to device\n if self._model.device != self.device:\n self._model = self._model.to(self.device)\n\n # distributed training wrapper\n if self.use_distributed:\n if self._wrapped_model is None:\n self._wrapped_model = DDP(\n self._model, device_ids=[self.config.run_cfg.gpu]\n )\n else:\n self._wrapped_model = self._model\n\n return self._wrapped_model\n\n @property\n def optimizer(self):\n # TODO make optimizer class and configurations\n if self._optimizer is None:\n lr_scale = self.config.run_cfg.get(\"lr_layer_decay\", 1)\n weight_decay = self.config.run_cfg.get(\"weight_decay\", 0.05)\n optim_params = self._model.get_optimizer_params(weight_decay,lr_scale)\n\n num_parameters = 0\n for p_group in optim_params:\n for p in p_group[\"params\"]:\n num_parameters += p.data.nelement() \n logging.info(\"number of trainable parameters: {}\".format(num_parameters)) \n \n beta2 = self.config.run_cfg.get(\"beta2\", 0.999)\n\n self._optimizer = torch.optim.AdamW(\n optim_params,\n lr=float(self.config.run_cfg.init_lr),\n betas=(0.9, beta2),\n ) \n return self._optimizer\n\n @property\n def scaler(self):\n amp = self.config.run_cfg.get(\"amp\", False)\n\n if amp:\n if self._scaler is None:\n self._scaler = torch.cuda.amp.GradScaler()\n\n return self._scaler\n\n @property\n def lr_scheduler(self):\n \"\"\"\n A property to get and create learning rate scheduler by split just in need.\n \"\"\"\n if self._lr_sched is None:\n lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)\n\n # max_epoch = self.config.run_cfg.max_epoch\n max_epoch = self.max_epoch\n # min_lr = self.config.run_cfg.min_lr\n min_lr = self.min_lr\n # init_lr = self.config.run_cfg.init_lr\n init_lr = self.init_lr\n\n # optional parameters\n decay_rate = self.config.run_cfg.get(\"lr_decay_rate\", None)\n warmup_start_lr = self.config.run_cfg.get(\"warmup_lr\", -1)\n warmup_steps = self.config.run_cfg.get(\"warmup_steps\", 0)\n\n self._lr_sched = lr_sched_cls(\n optimizer=self.optimizer,\n max_epoch=max_epoch,\n min_lr=min_lr,\n init_lr=init_lr,\n decay_rate=decay_rate,\n warmup_start_lr=warmup_start_lr,\n warmup_steps=warmup_steps,\n )\n\n return self._lr_sched\n\n @property\n def dataloaders(self) -> dict:\n \"\"\"\n A property to get and create dataloaders by split just in need.\n\n If no train_dataset_ratio is provided, concatenate map-style datasets and\n chain wds.DataPipe datasets separately. Training set becomes a tuple\n (ConcatDataset, ChainDataset), both are optional but at least one of them is\n required. The resultant ConcatDataset and ChainDataset will be sampled evenly.\n\n If train_dataset_ratio is provided, create a MultiIterLoader to sample\n each dataset by ratios during training.\n\n Currently do not support multiple datasets for validation and test.\n\n Returns:\n dict: {split_name: (tuples of) dataloader}\n \"\"\"\n if self._dataloaders is None:\n # reoganize datasets by split and concatenate/chain if necessary\n dataset_ratios = self.config.run_cfg.get(\"train_dataset_ratios\", None)\n\n # concatenate map-style datasets and chain wds.DataPipe datasets separately\n # training set becomes a tuple (ConcatDataset, ChainDataset), both are\n # optional but at least one of them is required. The resultant ConcatDataset\n # and ChainDataset will be sampled evenly.\n logging.info(\n \"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).\"\n )\n\n datasets = reorg_datasets_by_split(self.datasets)\n self.datasets = concat_datasets(datasets)\n\n # print dataset statistics after concatenation/chaining\n for split_name in self.datasets:\n if isinstance(self.datasets[split_name], tuple) or isinstance(\n self.datasets[split_name], list\n ):\n # mixed wds.DataPipeline and torch.utils.data.Dataset\n num_records = sum(\n [\n len(d)\n if not type(d) in [wds.DataPipeline, ChainDataset]\n else 0\n for d in self.datasets[split_name]\n ]\n )\n\n else:\n if hasattr(self.datasets[split_name], \"__len__\"):\n # a single map-style dataset\n num_records = len(self.datasets[split_name])\n else:\n # a single wds.DataPipeline\n num_records = -1\n logging.info(\n \"Only a single wds.DataPipeline dataset, no __len__ attribute.\"\n )\n\n if num_records >= 0:\n logging.info(\n \"Loaded {} records for {} split from the dataset.\".format(\n num_records, split_name\n )\n )\n\n # create dataloaders\n split_names = sorted(self.datasets.keys())\n\n datasets = [self.datasets[split] for split in split_names]\n is_trains = [split in self.train_splits for split in split_names]\n\n batch_sizes = [\n self.config.run_cfg.batch_size_train\n if split == \"train\"\n else self.config.run_cfg.batch_size_eval\n for split in split_names\n ]\n\n collate_fns = []\n for dataset in datasets:\n if isinstance(dataset, tuple) or isinstance(dataset, list):\n collate_fns.append([getattr(d, \"collater\", None) for d in dataset])\n else:\n collate_fns.append(getattr(dataset, \"collater\", None))\n\n dataloaders = self.create_loaders(\n datasets=datasets,\n num_workers=self.config.run_cfg.num_workers,\n batch_sizes=batch_sizes,\n is_trains=is_trains,\n collate_fns=collate_fns,\n dataset_ratios=dataset_ratios,\n )\n\n self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}\n\n return self._dataloaders\n\n @property\n def cuda_enabled(self):\n return self.device.type == \"cuda\"\n\n @property\n def max_epoch(self):\n return int(self.config.run_cfg.max_epoch)\n\n @property\n def log_freq(self):\n log_freq = self.config.run_cfg.get(\"log_freq\", 50)\n return int(log_freq)\n\n @property\n def init_lr(self):\n return float(self.config.run_cfg.init_lr)\n\n @property\n def min_lr(self):\n return float(self.config.run_cfg.min_lr)\n\n @property\n def accum_grad_iters(self):\n return int(self.config.run_cfg.get(\"accum_grad_iters\", 1))\n\n @property\n def valid_splits(self):\n valid_splits = self.config.run_cfg.get(\"valid_splits\", [])\n\n if len(valid_splits) == 0:\n logging.info(\"No validation splits found.\")\n\n return valid_splits\n\n @property\n def test_splits(self):\n test_splits = self.config.run_cfg.get(\"test_splits\", [])\n\n return test_splits\n\n @property\n def train_splits(self):\n train_splits = self.config.run_cfg.get(\"train_splits\", [])\n\n if len(train_splits) == 0:\n logging.info(\"Empty train splits.\")\n\n return train_splits\n\n @property\n def evaluate_only(self):\n \"\"\"\n Set to True to skip training.\n \"\"\"\n return self.config.run_cfg.evaluate\n\n @property\n def use_dist_eval_sampler(self):\n return self.config.run_cfg.get(\"use_dist_eval_sampler\", True)\n\n @property\n def resume_ckpt_path(self):\n return self.config.run_cfg.get(\"resume_ckpt_path\", None)\n\n @property\n def train_loader(self):\n train_dataloader = self.dataloaders[\"train\"]\n\n return train_dataloader\n\n def setup_output_dir(self):\n lib_root = Path(registry.get_path(\"library_root\"))\n\n output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id\n result_dir = output_dir / \"result\"\n\n output_dir.mkdir(parents=True, exist_ok=True)\n result_dir.mkdir(parents=True, exist_ok=True)\n\n registry.register_path(\"result_dir\", str(result_dir))\n registry.register_path(\"output_dir\", str(output_dir))\n\n self.result_dir = result_dir\n self.output_dir = output_dir\n\n def train(self):\n start_time = time.time()\n best_agg_metric = 0\n best_epoch = 0\n\n self.log_config()\n\n # resume from checkpoint if specified\n if not self.evaluate_only and self.resume_ckpt_path is not None:\n self._load_checkpoint(self.resume_ckpt_path)\n\n for cur_epoch in range(self.start_epoch, self.max_epoch):\n # training phase\n if not self.evaluate_only:\n logging.info(\"Start training\")\n train_stats = self.train_epoch(cur_epoch)\n self.log_stats(split_name=\"train\", stats=train_stats)\n \n self._save_checkpoint(cur_epoch, is_best=False)\n\n\n # evaluation phase\n if len(self.valid_splits) > 0:\n for split_name in self.valid_splits:\n logging.info(\"Evaluating on {}.\".format(split_name))\n\n val_log = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch\n )\n # if val_log is not None:\n # if is_main_process():\n # assert (\n # \"agg_metrics\" in val_log\n # ), \"No agg_metrics found in validation log.\"\n\n # agg_metrics = val_log[\"agg_metrics\"]\n # if agg_metrics > best_agg_metric and split_name == \"val\":\n # best_epoch, best_agg_metric = cur_epoch, agg_metrics\n\n # self._save_checkpoint(cur_epoch, is_best=True)\n\n # val_log.update({\"best_epoch\": best_epoch})\n # self.log_stats(val_log, split_name)\n\n else:\n # if no validation split is provided, we just save the checkpoint at the end of each epoch.\n if not self.evaluate_only:\n self._save_checkpoint(cur_epoch, is_best=False)\n\n if self.evaluate_only:\n break\n\n dist.barrier()\n\n # testing phase\n test_epoch = \"best\" if len(self.valid_splits) > 0 else cur_epoch\n self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logging.info(\"Training time {}\".format(total_time_str))\n\n def evaluate(self, cur_epoch=\"best\", skip_reload=False):\n test_logs = dict()\n\n if len(self.test_splits) > 0:\n for split_name in self.test_splits:\n test_logs[split_name] = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload\n )\n\n return test_logs\n\n def train_epoch(self, epoch):\n # train\n self.model.train()\n\n return self.task.train_epoch(\n epoch=epoch,\n model=self.model,\n data_loader=self.train_loader,\n optimizer=self.optimizer,\n scaler=self.scaler,\n lr_scheduler=self.lr_scheduler,\n cuda_enabled=self.cuda_enabled,\n log_freq=self.log_freq,\n accum_grad_iters=self.accum_grad_iters,\n )\n\n @torch.no_grad()\n def eval_epoch(self, split_name, cur_epoch, skip_reload=False):\n \"\"\"\n Evaluate the model on a given split.\n\n Args:\n split_name (str): name of the split to evaluate on.\n cur_epoch (int): current epoch.\n skip_reload_best (bool): whether to skip reloading the best checkpoint.\n During training, we will reload the best checkpoint for validation.\n During testing, we will use provided weights and skip reloading the best checkpoint .\n \"\"\"\n data_loader = self.dataloaders.get(split_name, None)\n assert data_loader, \"data_loader for split {} is None.\".format(split_name)\n\n # TODO In validation, you need to compute loss as well as metrics\n # TODO consider moving to model.before_evaluation()\n model = self.unwrap_dist_model(self.model)\n if not skip_reload and cur_epoch == \"best\":\n model = self._reload_best_model(model)\n model.eval()\n\n self.task.before_evaluation(\n model=model,\n dataset=self.datasets[split_name],\n )\n results = self.task.evaluation(model, data_loader)\n\n if results is not None:\n return self.task.after_evaluation(\n val_result=results,\n split_name=split_name,\n epoch=cur_epoch,\n )\n\n def unwrap_dist_model(self, model):\n if self.use_distributed:\n return model.module\n else:\n return model\n\n def create_loaders(\n self,\n datasets,\n num_workers,\n batch_sizes,\n is_trains,\n collate_fns,\n dataset_ratios=None,\n ):\n \"\"\"\n Create dataloaders for training and validation.\n \"\"\"\n\n def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):\n # create a single dataloader for each split\n if isinstance(dataset, ChainDataset) or isinstance(\n dataset, wds.DataPipeline\n ):\n # wds.WebdDataset instance are chained together\n # webdataset.DataPipeline has its own sampler and collate_fn\n loader = iter(\n DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n )\n )\n else:\n # map-style dataset are concatenated together\n # setup distributed sampler\n if self.use_distributed:\n sampler = DistributedSampler(\n dataset,\n shuffle=is_train,\n num_replicas=get_world_size(),\n rank=get_rank(),\n )\n if not self.use_dist_eval_sampler:\n # e.g. retrieval evaluation\n sampler = sampler if is_train else None\n else:\n sampler = None\n\n loader = DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None and is_train,\n collate_fn=collate_fn,\n drop_last=True if is_train else False,\n )\n loader = PrefetchLoader(loader)\n\n if is_train:\n loader = IterLoader(loader, use_distributed=self.use_distributed)\n\n return loader\n\n loaders = []\n\n for dataset, bsz, is_train, collate_fn in zip(\n datasets, batch_sizes, is_trains, collate_fns\n ):\n if isinstance(dataset, list) or isinstance(dataset, tuple):\n loader = MultiIterLoader(\n loaders=[\n _create_loader(d, num_workers, bsz, is_train, collate_fn[i])\n for i, d in enumerate(dataset)\n ],\n ratios=dataset_ratios,\n )\n else:\n loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)\n\n loaders.append(loader)\n\n return loaders\n\n @main_process\n def _save_checkpoint(self, cur_epoch, is_best=False):\n \"\"\"\n Save the checkpoint at the current epoch.\n \"\"\"\n model_no_ddp = self.unwrap_dist_model(self.model)\n param_grad_dic = {\n k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()\n }\n state_dict = model_no_ddp.state_dict()\n for k in list(state_dict.keys()):\n if k in param_grad_dic.keys() and not param_grad_dic[k]:\n # delete parameters that do not require gradient\n del state_dict[k]\n save_obj = {\n \"model\": state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"config\": self.config.to_dict(),\n \"scaler\": self.scaler.state_dict() if self.scaler else None,\n \"epoch\": cur_epoch,\n }\n save_to = os.path.join(\n self.output_dir,\n \"checkpoint_{}.pth\".format(\"best\" if is_best else cur_epoch),\n )\n logging.info(\"Saving checkpoint at epoch {} to {}.\".format(cur_epoch, save_to))\n torch.save(save_obj, save_to)\n\n def _reload_best_model(self, model):\n \"\"\"\n Load the best checkpoint for evaluation.\n \"\"\"\n checkpoint_path = os.path.join(self.output_dir, \"checkpoint_best.pth\")\n\n logging.info(\"Loading checkpoint from {}.\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(checkpoint[\"model\"])\n except RuntimeError as e:\n logging.warning(\n \"\"\"\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n \"\"\"\n )\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n return model\n\n def _load_checkpoint(self, url_or_filename):\n \"\"\"\n Resume from a checkpoint.\n \"\"\"\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=self.device)\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=self.device)\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n self.unwrap_dist_model(self.model).load_state_dict(state_dict)\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if self.scaler and \"scaler\" in checkpoint:\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n logging.info(\"Resume checkpoint from {}\".format(url_or_filename))\n\n @main_process\n def log_stats(self, stats, split_name):\n if isinstance(stats, dict):\n log_stats = {**{f\"{split_name}_{k}\": v for k, v in stats.items()}}\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n elif isinstance(stats, list):\n pass\n\n @main_process\n def log_config(self):\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(self.config.to_dict(), indent=4) + \"\\n\")"
}
] | import argparse
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import lavis.tasks as tasks
from lavis.common.config import Config
from lavis.common.dist_utils import get_rank, init_distributed_mode
from lavis.common.logger import setup_logger
from lavis.common.optims import (
LinearWarmupCosineLRScheduler,
LinearWarmupStepLRScheduler,
)
from lavis.common.utils import now
from lavis.datasets.builders import *
from lavis.models import *
from lavis.processors import *
from lavis.runners.runner_base import RunnerBase
from lavis.tasks import * | 7,644 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--cfg-path",
default='',
help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def main():
# allow auto-dl completes on main process without timeout when using NCCL backend.
# os.environ["NCCL_BLOCKING_WAIT"] = "1"
# set before init_distributed_mode() to ensure the same job_id shared across all ranks.
job_id = now()
cfg = Config(parse_args())
init_distributed_mode(cfg.run_cfg)
setup_seeds(cfg)
# set after init_distributed_mode() to only log on master.
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--cfg-path",
default='',
help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def main():
# allow auto-dl completes on main process without timeout when using NCCL backend.
# os.environ["NCCL_BLOCKING_WAIT"] = "1"
# set before init_distributed_mode() to ensure the same job_id shared across all ranks.
job_id = now()
cfg = Config(parse_args())
init_distributed_mode(cfg.run_cfg)
setup_seeds(cfg)
# set after init_distributed_mode() to only log on master. | setup_logger() | 3 | 2023-11-14 08:57:59+00:00 | 12k |
ml4bio/USPNet | Net/New_ComModel.py | [
{
"identifier": "MultiHeadAttention",
"path": "Net/SelfAttentionTorch.py",
"snippet": "class MultiHeadAttention(nn.Module):\n\n def __init__(self,\n config\n ):\n \"\"\"Multi-head attention.\n :param in_features: Size of each input sample.\n :param head_num: Number of heads.\n :param bias: Whether to use the bias term.\n :param activation: The activation after each linear transformation.\n \"\"\"\n super(MultiHeadAttention, self).__init__()\n\n in_features = config['d_model']\n head_num = config['h']\n bias = config['bias']\n activation = config['activation']\n\n if in_features % head_num != 0:\n raise ValueError('`in_features`({}) should be divisible by `head_num`({})'.format(in_features, head_num))\n self.in_features = in_features\n self.head_num = head_num\n self.activation = activation\n self.bias = bias\n self.linear_q = nn.Linear(in_features, in_features, bias)\n self.linear_k = nn.Linear(in_features, in_features, bias)\n self.linear_v = nn.Linear(in_features, in_features, bias)\n self.linear_o = nn.Linear(in_features, in_features, bias)\n\n def forward(self, q, k, v, mask=None):\n q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v)\n if self.activation is not None:\n q = self.activation(q)\n k = self.activation(k)\n v = self.activation(v)\n\n q = self._reshape_to_batches(q)\n k = self._reshape_to_batches(k)\n v = self._reshape_to_batches(v)\n if mask is not None:\n mask = mask.repeat(self.head_num, 1, 1)\n y = ScaledDotProductAttention()(q, k, v, mask)\n y = self._reshape_from_batches(y)\n\n y = self.linear_o(y)\n if self.activation is not None:\n y = self.activation(y)\n return y\n\n @staticmethod\n def gen_history_mask(x):\n \"\"\"Generate the mask that only uses history data.\n :param x: Input tensor.\n :return: The mask.\n \"\"\"\n batch_size, seq_len, _ = x.size()\n return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1)\n\n def _reshape_to_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n sub_dim = in_feature // self.head_num\n return x.reshape(batch_size, seq_len, self.head_num, sub_dim)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size * self.head_num, seq_len, sub_dim)\n\n def _reshape_from_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n batch_size //= self.head_num\n out_dim = in_feature * self.head_num\n return x.reshape(batch_size, self.head_num, seq_len, in_feature)\\\n .permute(0, 2, 1, 3)\\\n .reshape(batch_size, seq_len, out_dim)\n\n def extra_repr(self):\n return 'in_features={}, head_num={}, bias={}, activation={}'.format(\n self.in_features, self.head_num, self.bias, self.activation,\n )"
},
{
"identifier": "TransformerEncoder",
"path": "Net/transformer.py",
"snippet": "class TransformerEncoder(nn.Module):\n ''' A neural network Transformer Encoder '''\n\n def __init__(self, vocab_size, max_sequence_length, qty_encoder_layer=1, qty_attention_head=8,\n dim_k=32, dim_v=32, dim_word_vector=256, dim_model=256, dim_inner_hidden=128, output_size=3,\n dropout=0.2, attn_dropout=0.1, embedding=False):\n super(TransformerEncoder, self).__init__()\n positions = max_sequence_length # counting UNK\n\n self.max_sequence_length = max_sequence_length\n self.dim_model = dim_model\n\n # Embedding containing sentence order information\n self.position_encoder = nn.Embedding(positions, dim_word_vector, padding_idx=0)\n self.position_encoder.weight.data = position_encoding_init(positions, dim_word_vector)\n\n # Embedding vector of words. TODO: test with word2vec\n self.word_embedding_layer = nn.Embedding(vocab_size, dim_word_vector, padding_idx=0)\n\n # Create a set of encoder layers, given the quantity informed in\n self.encoder_layers = nn.ModuleList([\n EncoderLayer(dim_model, dim_inner_hidden, qty_attention_head, dim_k, dim_v, dropout=dropout, attn_dropout=attn_dropout)\n for _ in range(qty_encoder_layer)\n ])\n\n # whether do embedding before attention module\n self.embedding = embedding\n logger.info('''Transformer Model:\n - max sequence length = {}\n - encoder layers = {}\n - attention heads = {}\n '''.format(max_sequence_length, qty_encoder_layer, qty_attention_head))\n\n def get_trainable_parameters(self):\n \"\"\" Avoid updating the position encoding \"\"\"\n position_parameters = set(map(id, self.position_encoder.parameters()))\n return (p for p in self.parameters() if id(p) not in position_parameters)\n\n def forward(self, sequence):\n if(self.embedding):\n # lookup word embedding layer\n word_embedding = self.word_embedding_layer(sequence)\n else:\n word_embedding = sequence\n encoder_output = word_embedding\n\n for encoder_layer in self.encoder_layers:\n encoder_output, attentions = encoder_layer(encoder_output)\n\n return encoder_output\n\n def get_positions(self, sequence):\n \"\"\"\n Get position\n :param sequence: input tensor\n :return: array with the order of each element. Example: [23, 45, 67, 54, PAD, PAD] ---> [1, 2, 3, 4, 0, 0]\n \"\"\"\n\n PADDING = 0\n positions = [[pos + 1 if word != PADDING else 0 for pos, word in enumerate(instance)] for instance in sequence]\n return torch.autograd.Variable(torch.LongTensor(positions), volatile=False).cuda()"
},
{
"identifier": "CRF",
"path": "Net/CRF.py",
"snippet": "class CRF(nn.Module):\n \"\"\"Conditional random field.\n\n This module implements a conditional random field [LMP01]_. The forward computation\n of this class computes the log likelihood of the given sequence of tags and\n emission score tensor. This class also has `~CRF.decode` method which finds\n the best tag sequence given an emission score tensor using `Viterbi algorithm`_.\n\n Args:\n num_tags: Number of tags.\n batch_first: Whether the first dimension corresponds to the size of a minibatch.\n reweight_ratio: Used to solve imbalance problem. The idea is from\n \"DeepCNF-D: Predicting Protein Order/Disorder Regions by\n Weighted Deep Convolutional Neural Fields\"\n\n Attributes:\n start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size\n ``(num_tags,)``.\n end_transitions (`~torch.nn.Parameter`): End transition score tensor of size\n ``(num_tags,)``.\n transitions (`~torch.nn.Parameter`): Transition score tensor of size\n ``(num_tags, num_tags)``.\n\n\n .. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).\n \"Conditional random fields: Probabilistic models for segmenting and\n labeling sequence data\". *Proc. 18th International Conf. on Machine\n Learning*. Morgan Kaufmann. pp. 282–289.\n\n .. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm\n \"\"\"\n\n def __init__(self, num_tags: int, batch_first: bool = False, reweight_ratio = None) -> None:\n if num_tags <= 0:\n raise ValueError(f'invalid number of tags: {num_tags}')\n super().__init__()\n self.num_tags = num_tags\n self.batch_first = batch_first\n self.start_transitions = nn.Parameter(torch.empty(num_tags))\n self.end_transitions = nn.Parameter(torch.empty(num_tags))\n self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))\n\n self.reset_parameters()\n\n\n self.reweight_ratio = reweight_ratio\n\n def reset_parameters(self) -> None:\n \"\"\"Initialize the transition parameters.\n\n The parameters will be initialized randomly from a uniform distribution\n between -0.1 and 0.1.\n \"\"\"\n nn.init.uniform_(self.start_transitions, -0.1, 0.1)\n nn.init.uniform_(self.end_transitions, -0.1, 0.1)\n nn.init.uniform_(self.transitions, -0.1, 0.1)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(num_tags={self.num_tags})'\n\n def forward(\n self,\n emissions: torch.Tensor,\n tags: torch.LongTensor,\n mask: Optional[torch.ByteTensor] = None,\n reduction: str = 'sum',\n ) -> torch.Tensor:\n \"\"\"Compute the conditional log likelihood of a sequence of tags given emission scores.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n tags (`~torch.LongTensor`): Sequence of tags tensor of size\n ``(seq_length, batch_size)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n reduction: Specifies the reduction to apply to the output:\n ``none|sum|mean|token_mean``. ``none``: no reduction will be applied.\n ``sum``: the output will be summed over batches. ``mean``: the output will be\n averaged over batches. ``token_mean``: the output will be averaged over tokens.\n\n Returns:\n `~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if\n reduction is ``none``, ``()`` otherwise.\n \"\"\"\n self._validate(emissions, tags=tags, mask=mask)\n if reduction not in ('none', 'sum', 'mean', 'token_mean'):\n raise ValueError(f'invalid reduction: {reduction}')\n if mask is None:\n mask = torch.ones_like(tags, dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n tags = tags.transpose(0, 1)\n mask = mask.transpose(0, 1)\n if(self.reweight_ratio!=None):\n # shape: (batch_size,)\n numerator = self._compute_score_reweight(emissions, tags, mask)\n # shape: (batch_size,)\n denominator = self._compute_normalizer_reweight(emissions, mask)\n else:\n # shape: (batch_size,)\n numerator = self._compute_score(emissions, tags, mask)\n # shape: (batch_size,)\n denominator = self._compute_normalizer(emissions, mask)\n\n # shape: (batch_size,)\n llh = numerator - denominator\n\n if reduction == 'none':\n return llh\n if reduction == 'sum':\n return llh.sum()\n if reduction == 'mean':\n return llh.mean()\n assert reduction == 'token_mean'\n return llh.sum() / mask.type_as(emissions).sum()\n\n def decode(self, emissions: torch.Tensor,\n mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:\n \"\"\"Find the most likely tag sequence using Viterbi algorithm.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n\n Returns:\n List of list containing the best tag sequence for each batch.\n \"\"\"\n self._validate(emissions, mask=mask)\n if mask is None:\n mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n mask = mask.transpose(0, 1)\n\n return self._viterbi_decode(emissions, mask)\n\n def decode_based_on_prob(self, emissions: torch.Tensor,\n mask: Optional[torch.ByteTensor] = None, padding=False, reduce=False, torch_form=False) -> List[List[int]]:\n \"\"\"Find the most likely tag sequence using prob matrix.\n\n Args:\n emissions (`~torch.Tensor`): Emission score tensor of size\n ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,\n ``(batch_size, seq_length, num_tags)`` otherwise.\n mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``\n if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.\n\n Returns:\n List of list containing the best tag sequence for each batch.\n \"\"\"\n self._validate(emissions, mask=mask)\n if mask is None:\n mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)\n\n if self.batch_first:\n emissions = emissions.transpose(0, 1)\n mask = mask.transpose(0, 1)\n\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = self._compute_prob(emissions, mask, padding=padding)\n\n if(reduce):\n if(torch_form):\n result = torch.argmax(prob_matrix, dim=2)\n else:\n result =torch.argmax(prob_matrix, dim=2).detach().cpu().numpy()\n else:\n if(torch_form):\n result = prob_matrix\n else:\n result = prob_matrix.detach().cpu().numpy()\n return result\n\n def _validate(\n self,\n emissions: torch.Tensor,\n tags: Optional[torch.LongTensor] = None,\n mask: Optional[torch.ByteTensor] = None) -> None:\n if emissions.dim() != 3:\n raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')\n if emissions.size(2) != self.num_tags:\n raise ValueError(\n f'expected last dimension of emissions is {self.num_tags}, '\n f'got {emissions.size(2)}')\n\n if tags is not None:\n if emissions.shape[:2] != tags.shape:\n raise ValueError(\n 'the first two dimensions of emissions and tags must match, '\n f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')\n\n if mask is not None:\n if emissions.shape[:2] != mask.shape:\n raise ValueError(\n 'the first two dimensions of emissions and mask must match, '\n f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')\n no_empty_seq = not self.batch_first and mask[0].all()\n no_empty_seq_bf = self.batch_first and mask[:, 0].all()\n if not no_empty_seq and not no_empty_seq_bf:\n raise ValueError('mask of the first timestep must all be on')\n\n def _compute_score(\n self, emissions: torch.Tensor, tags: torch.LongTensor,\n mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # tags: (seq_length, batch_size)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and tags.dim() == 2\n assert emissions.shape[:2] == tags.shape\n assert emissions.size(2) == self.num_tags\n assert mask.shape == tags.shape\n assert mask[0].all()\n\n seq_length, batch_size = tags.shape\n mask = mask.type_as(emissions)\n\n # Start transition score and first emission\n # shape: (batch_size,)\n score = self.start_transitions[tags[0]]\n score += emissions[0, torch.arange(batch_size), tags[0]]\n\n for i in range(1, seq_length):\n # Transition score to next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n\n score += self.transitions[tags[i - 1], tags[i]] * mask[i]\n\n # Emission score for next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]\n\n # End transition score\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n # shape: (batch_size,)\n last_tags = tags[seq_ends, torch.arange(batch_size)]\n # shape: (batch_size,)\n score += self.end_transitions[last_tags]\n\n\n return score\n\n def _compute_normalizer(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)\n\n def _compute_normalizer_reverse(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.end_transitions + emissions[seq_length-1]\n\n for i in range(seq_length-2, -1, -1):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.start_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)\n\n def _compute_prob(\n self, emissions: torch.Tensor, mask: torch.ByteTensor, padding=True) -> torch.Tensor:\n\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n forward_matrix = self._compute_forward(emissions, mask).permute(0, 2, 1)\n backward_matrix = self._compute_backward(emissions, mask).permute(0, 2, 1)\n batch_size, seq_length, num_tags = backward_matrix.shape\n\n normalizer = self._compute_normalizer(emissions, mask).unsqueeze(1).repeat_interleave(seq_length, dim=1)\\\n .unsqueeze(2).repeat_interleave(num_tags, dim=2)\n if (padding):\n num_tags = num_tags - 1\n # shape: (batch_size, seq_length, num_tags)\n\n prob_matrix = torch.exp(forward_matrix + backward_matrix - normalizer)\n # clamp to [0, 1]\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = prob_matrix.clamp(0, 1)\n\n # normalize\n # shape: (batch_size * seq_length, num_tags)\n if (padding):\n prob_matrix = flatten_lists_3D(prob_matrix)[:, 0:-1]\n else:\n prob_matrix = flatten_lists_3D(prob_matrix)\n # shape: (batch_size * seq_length, )\n sum_prob = prob_matrix.sum(dim=1)\n # shape: (batch_size * seq_length, 3)\n sum_prob = prob_matrix.sum(dim=1).unsqueeze(1).repeat_interleave(num_tags, dim=1)\n # if for specific position in a sequence, the total prob != 0, then we calculate according to prob_pos / total_prob;\n prob_matrix = prob_matrix / sum_prob\n # shape: (batch_size, seq_length, num_tags)\n prob_matrix = prob_matrix.reshape((batch_size, seq_length, num_tags))\n\n return prob_matrix\n\n def _compute_forward(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n\n seq_length = emissions.size(0)\n\n score_list = []\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n score_list.append(score.unsqueeze(2))\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n if(i<seq_length-1):\n score_list.append(score.unsqueeze(2))\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n score_list.append(score.unsqueeze(2))\n\n forward_matrix = torch.cat(score_list, dim=2)\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return forward_matrix\n\n def _compute_backward(\n self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n\n seq_length = emissions.size(0)\n\n score_list = []\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = self.end_transitions + emissions[seq_length - 1]\n\n score_list.append(score.unsqueeze(2))\n\n for i in range(seq_length - 2, -1, -1):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n if(i>0):\n score_list.append(score.unsqueeze(2))\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.start_transitions\n score_list.append(score.unsqueeze(2))\n score_list.reverse()\n\n backward_matrix = torch.cat(score_list, dim=2)\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return backward_matrix\n\n def _viterbi_decode(self, emissions: torch.FloatTensor,\n mask: torch.ByteTensor) -> List[List[int]]:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == self.num_tags\n assert mask[0].all()\n\n seq_length, batch_size = mask.shape\n\n # Start transition and first emission\n # shape: (batch_size, num_tags)\n score = self.start_transitions + emissions[0]\n history = []\n\n # score is a tensor of size (batch_size, num_tags) where for every batch,\n # value at column j stores the score of the best tag sequence so far that ends\n # with tag j\n # history saves where the best tags candidate transitioned from; this is used\n # when we trace back the best tag sequence\n\n # Viterbi algorithm recursive case: we compute the score of the best tag sequence\n # for every possible next tag\n for i in range(1, seq_length):\n # Broadcast viterbi score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emission = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the score of the best\n # tag sequence so far that ends with transitioning from tag i to tag j and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + self.transitions + broadcast_emission\n\n # Find the maximum score over all possible current tag\n # shape: (batch_size, num_tags)\n next_score, indices = next_score.max(dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # and save the index that produces the next score\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n history.append(indices)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += self.end_transitions\n\n # Now, compute the best path for each sample\n\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n best_tags_list = []\n\n for idx in range(batch_size):\n # Find the tag which maximizes the score at the last timestep; this is our best tag\n # for the last timestep\n _, best_last_tag = score[idx].max(dim=0)\n best_tags = [best_last_tag.item()]\n\n # We trace back where the best last tag comes from, append that to our best tag\n # sequence, and trace it back again, and so on\n for hist in reversed(history[:seq_ends[idx]]):\n best_last_tag = hist[idx][best_tags[-1]]\n best_tags.append(best_last_tag.item())\n\n # Reverse the order because we start from the last timestep\n best_tags.reverse()\n best_tags_list.append(best_tags)\n\n return best_tags_list\n\n def _compute_score_reweight(\n self, emissions_unmask: torch.Tensor, tags: torch.LongTensor,\n mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # tags: (seq_length, batch_size)\n # mask: (seq_length, batch_size)\n assert emissions_unmask.dim() == 3 and tags.dim() == 2\n assert emissions_unmask.shape[:2] == tags.shape\n assert emissions_unmask.size(2) == self.num_tags\n assert mask.shape == tags.shape\n assert mask[0].all()\n\n device = emissions_unmask.device\n\n seq_length, batch_size, num_tags = emissions_unmask.shape\n num_tags, _ = self.transitions.shape\n\n # add reweight mask to emissions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((seq_length, batch_size, 1)))\n e_mask = torch.cat(list, dim=2).to(device)\n emissions = e_mask * emissions_unmask\n\n list = []\n # add reweight mask to transitions\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((1, num_tags)))\n t_mask = torch.cat(list, dim=0).to(device)\n transitions_mask = t_mask * self.transitions\n\n # add reweight mask to start_transitions\n reweight_factor = torch.tensor([self.reweight_ratio[tag] for tag in self.reweight_ratio.keys()]).to(device)\n start_transitions = self.start_transitions * reweight_factor\n\n # add reweight mask to end_transitions\n end_transitions = self.end_transitions * reweight_factor\n\n seq_length, batch_size = tags.shape\n mask = mask.type_as(emissions)\n\n # Start transition score and first emission\n # shape: (batch_size,)\n score = start_transitions[tags[0]]\n score += emissions[0, torch.arange(batch_size), tags[0]]\n\n for i in range(1, seq_length):\n # Transition score to next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n\n score += transitions_mask[tags[i - 1], tags[i]] * mask[i]\n\n # Emission score for next tag, only added if next timestep is valid (mask == 1)\n # shape: (batch_size,)\n score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]\n\n # End transition score\n # shape: (batch_size,)\n seq_ends = mask.long().sum(dim=0) - 1\n # shape: (batch_size,)\n last_tags = tags[seq_ends, torch.arange(batch_size)]\n # shape: (batch_size,)\n score += end_transitions[last_tags]\n\n return score\n\n def _compute_normalizer_reweight(\n self, emissions_unmask: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions_unmask.dim() == 3 and mask.dim() == 2\n assert emissions_unmask.shape[:2] == mask.shape\n assert emissions_unmask.size(2) == self.num_tags\n assert mask[0].all()\n\n device = emissions_unmask.device\n\n seq_length, batch_size, num_tags = emissions_unmask.shape\n num_tags, _ = self.transitions.shape\n\n # add reweight mask to emissions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((seq_length, batch_size, 1)))\n e_mask = torch.cat(list, dim=2).to(device)\n\n emissions = e_mask * emissions_unmask\n\n # add reweight mask to transitions\n list = []\n for tag in self.reweight_ratio.keys():\n list.append(self.reweight_ratio[tag] * torch.ones((1, num_tags)))\n t_mask = torch.cat(list, dim=0).to(device)\n transitions_mask = t_mask * self.transitions\n\n # add reweight mask to start_transitions\n reweight_factor = torch.tensor([self.reweight_ratio[tag] for tag in self.reweight_ratio.keys()]).to(device)\n start_transitions = self.start_transitions * reweight_factor\n\n # add reweight mask to end_transitions\n end_transitions = self.end_transitions * reweight_factor\n\n seq_length = emissions.size(0)\n\n # Start transition score and first emission; score has size of\n # (batch_size, num_tags) where for each batch, the j-th column stores\n # the score that the first timestep has tag j\n # shape: (batch_size, num_tags)\n score = start_transitions + emissions[0]\n\n for i in range(1, seq_length):\n # Broadcast score for every possible next tag\n # shape: (batch_size, num_tags, 1)\n broadcast_score = score.unsqueeze(2)\n\n # Broadcast emission score for every possible current tag\n # shape: (batch_size, 1, num_tags)\n broadcast_emissions = emissions[i].unsqueeze(1)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags) where\n # for each sample, entry at row i and column j stores the sum of scores of all\n # possible tag sequences so far that end with transitioning from tag i to tag j\n # and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + transitions_mask + broadcast_emissions\n\n # Sum over all possible current tags, but we're in score space, so a sum\n # becomes a log-sum-exp: for each sample, entry i stores the sum of scores of\n # all possible tag sequences so far, that end in tag i\n # shape: (batch_size, num_tags)\n next_score = torch.logsumexp(next_score, dim=1)\n\n # Set score to the next score if this timestep is valid (mask == 1)\n # shape: (batch_size, num_tags)\n score = torch.where(mask[i].unsqueeze(1), next_score, score)\n\n # End transition score\n # shape: (batch_size, num_tags)\n score += end_transitions\n\n # Sum (log-sum-exp) over all possible tags\n # shape: (batch_size,)\n return torch.logsumexp(score, dim=1)"
},
{
"identifier": "LSTM_attention",
"path": "Net/LSTM_Attention.py",
"snippet": "class LSTM_attention(nn.Module):\n ''' Compose with two layers '''\n def __init__(self,config):\n super(LSTM_attention, self).__init__()\n data = config[0]\n lstm_config = config[1]\n self.lstm = BLSTM(lstm_config)\n #self.slf_attn = multihead_attention(data.HP_hidden_dim,num_heads = data.num_attention_head, dropout_rate=data.HP_dropout)\n self.label_attn = multihead_attention(data['d_model'], num_heads=data['h'],dropout_rate=data['dropout'])\n self.linear = nn.Linear(data['d_model'], data['d_model'])\n self.act = nn.ReLU()\n self.droplstm = nn.Dropout(data['dropout'])\n self.gpu = data['gpu']\n if self.gpu:\n self.lstm =self.lstm.cuda()\n self.label_attn = self.label_attn.cuda()\n\n def forward(self,lstm_out,label_embs):\n\n lstm_out = self.lstm(lstm_out)\n\n lstm_out = self.droplstm(lstm_out)\n\n # lstm_out (seq_length * batch_size * hidden)\n label_attention_output = self.label_attn(lstm_out, label_embs, label_embs)\n # label_attention_output (batch_size, seq_len, embed_size)\n lstm_out = torch.cat([lstm_out, label_attention_output], -1)\n return lstm_out"
}
] | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Net.LSTM import *
from Net.CNN import *
from Net.SelfAttentionTorch import MultiHeadAttention
from Net.transformer import TransformerEncoder
from torch.autograd import Variable
from torch.nn import Parameter
from Net.CRF import CRF
from Net.LSTM_Attention import LSTM_attention | 10,357 |
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9
self.use_CRF = True
else:
self.use_CRF = False
self.linear = nn.Sequential(nn.Linear(config['embedding_size'], config1['input_dim'] - 4), nn.ReLU())
self.linear2 = nn.Linear(self.max_len, self.max_len)
self.lstm = BLSTM(config1)
self.cnn = TextCNN(cnn_configs[0])
self.cnn1 = TextCNN(cnn_configs[1])
self.lstm2 = BLSTM(lstm_config)
|
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9
self.use_CRF = True
else:
self.use_CRF = False
self.linear = nn.Sequential(nn.Linear(config['embedding_size'], config1['input_dim'] - 4), nn.ReLU())
self.linear2 = nn.Linear(self.max_len, self.max_len)
self.lstm = BLSTM(config1)
self.cnn = TextCNN(cnn_configs[0])
self.cnn1 = TextCNN(cnn_configs[1])
self.lstm2 = BLSTM(lstm_config)
| self.lstm_lan = LSTM_attention(lstm_lan_config) | 3 | 2023-11-14 08:19:42+00:00 | 12k |
doodledood/chat-flock | examples/manual_hierarchical_participant.py | [
{
"identifier": "InMemoryChatDataBackingStore",
"path": "chatflock/backing_stores/in_memory.py",
"snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False"
},
{
"identifier": "Chat",
"path": "chatflock/base.py",
"snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])"
},
{
"identifier": "LangChainBasedAIChatConductor",
"path": "chatflock/conductors/langchain.py",
"snippet": "class LangChainBasedAIChatConductor(ChatConductor):\n def __init__(\n self,\n chat_model: BaseChatModel,\n goal: str = \"No explicit goal provided.\",\n composition_generator: Optional[ChatCompositionGenerator] = None,\n interaction_schema: Optional[str] = None,\n retriever: Optional[BaseRetriever] = None,\n spinner: Optional[Halo] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n ):\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.goal = goal\n self.tools = tools\n self.retriever = retriever\n self.composition_generator = composition_generator\n self.interaction_schema = interaction_schema\n self.spinner = spinner\n\n self.composition_initialized = False\n\n def create_next_speaker_system_prompt(self, chat: \"Chat\") -> str:\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = StructuredString(\n sections=[\n Section(\n name=\"Mission\",\n text=\"Select the next speaker in the conversation based on the previous messages in the \"\n \"conversation and an optional INTERACTION SCHEMA. If it seems to you that the chat \"\n \"should end instead of selecting a next speaker, terminate it.\",\n ),\n Section(name=\"Rules\", list=[\"You can only select one of the participants in the group chat.\"]),\n Section(\n name=\"Process\",\n list=[\n \"Look at the last message in the conversation and determine who should speak next based on the \"\n \"INTERACTION SCHEMA, if provided.\",\n \"If you determine that the chat should end, you should return the \"\n \"string TERMINATE instead of a participant name. For example, when the goal has been achieved, \"\n \", it is impossible to reach, or if the user asks to terminate the chat.\",\n ],\n ),\n Section(\n name=\"Input\",\n list=[\n \"Chat goal\",\n \"Currently active participants in the conversation\",\n \"Speaker interaction schema\",\n \"Previous messages from the conversation\",\n ],\n ),\n Section(\n name=\"Output\",\n text=\"The name of the next speaker in the conversation. Or, TERMINATE if the chat should end, \"\n \"instead.\",\n ),\n Section(name=\"Example Outputs\", list=['\"John\"', '\"TERMINATE\"']),\n Section(\n name=\"Additional Context for Selection\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your selection of the \"\n \"next speaker, only use them for context for a better response, \"\n \"if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n ]\n )\n\n return str(system_message)\n\n def create_next_speaker_first_human_prompt(self, chat: \"Chat\", goal: str) -> str:\n messages = chat.get_messages()\n messages_list = [f\"- {message.sender_name}: {message.content}\" for message in messages]\n\n participants = chat.get_active_participants()\n\n prompt = StructuredString(\n sections=[\n Section(name=\"Goal\", text=goal or \"No explicit goal provided.\"),\n Section(\n name=\"Currently Active Participants\", list=[f\"{str(participant)}\" for participant in participants]\n ),\n Section(\n name=\"Interaction Schema\",\n text=self.interaction_schema or \"Not provided. Use your best judgement.\",\n ),\n Section(\n name=\"Chat Messages\",\n text=\"No messages yet.\" if len(messages_list) == 0 else None,\n list=messages_list if len(messages_list) > 0 else [],\n ),\n ]\n )\n\n return str(prompt)\n\n def prepare_chat(self, chat: \"Chat\", **kwargs: Any) -> None:\n # If a composition generator is provided, generate a new composition for the chat before starting.\n if self.composition_generator is not None and not self.composition_initialized:\n composition_suggestion = kwargs.get(\"composition_suggestion\", None)\n new_composition = self.composition_generator.generate_composition_for_chat(\n chat=chat,\n goal=self.goal,\n composition_suggestion=composition_suggestion,\n interaction_schema=self.interaction_schema,\n )\n\n # Sync participants with the new composition.\n current_active_participants = chat.get_active_participants()\n new_participants_names = {p.name for p in new_composition.participants}\n\n for participant in new_composition.participants:\n # Add missing participants.\n if not chat.has_active_participant_with_name(participant.name):\n chat.add_participant(participant)\n continue\n\n # Remove other participants not mentioned in the new composition.\n if participant.name not in new_participants_names:\n chat.remove_participant(participant)\n\n self.interaction_schema = new_composition.participants_interaction_schema\n\n self.composition_initialized = True\n\n super().prepare_chat(chat=chat, **kwargs)\n\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n participants = chat.get_active_participants()\n if len(participants) == 0:\n return None\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.start(text=\"The Chat Conductor is selecting the next speaker...\")\n else:\n self.spinner.start(text=f\"The Chat Conductor ({chat.name}) is selecting the next speaker...\")\n\n # Ask the AI to select the next speaker.\n messages = [\n SystemMessage(content=self.create_next_speaker_system_prompt(chat=chat)),\n HumanMessage(content=self.create_next_speaker_first_human_prompt(chat=chat, goal=self.goal)),\n ]\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n while not chat.has_active_participant_with_name(next_speaker_name) and next_speaker_name != \"TERMINATE\":\n messages.append(AIMessage(content=next_speaker_name))\n messages.append(\n HumanMessage(\n content=f'Speaker \"{next_speaker_name}\" is not a participant in the chat. Choose another one.'\n )\n )\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n if next_speaker_name == \"TERMINATE\":\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=\"The Chat Conductor has decided to terminate the chat.\"\n )\n else:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=f\"The Chat Conductor ({chat.name}) has decided to terminate the \" f\"chat.\"\n )\n\n return None\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None:\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.succeed(\n text=f'The Chat Conductor has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n else:\n self.spinner.succeed(\n text=f'The Chat Conductor ({chat.name}) has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n\n return next_speaker\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)"
},
{
"identifier": "RoundRobinChatConductor",
"path": "chatflock/conductors/round_robin.py",
"snippet": "class RoundRobinChatConductor(ChatConductor):\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n active_participants = chat.get_active_participants()\n if len(active_participants) <= 0:\n return None\n\n messages = chat.get_messages()\n last_message = messages[-1] if len(messages) > 0 else None\n\n if last_message is not None and self.is_termination_message(last_message):\n return None\n\n last_speaker = last_message.sender_name if last_message is not None else None\n if last_speaker is None:\n return next(iter(active_participants))\n\n # Rotate to the next participant in the list.\n participant_names = [participant.name for participant in active_participants]\n\n if last_speaker not in participant_names:\n next_speaker_name = participant_names[0]\n else:\n last_speaker_index = participant_names.index(last_speaker)\n next_speaker_index = (last_speaker_index + 1) % len(participant_names)\n next_speaker_name = participant_names[next_speaker_index]\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None or not isinstance(next_speaker, ActiveChatParticipant):\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n return next_speaker\n\n def get_chat_result(self, chat: \"Chat\") -> str:\n result = super().get_chat_result(chat=chat)\n\n try:\n idx = result.rindex(\"TERMINATE\")\n result = result[:idx].strip()\n except ValueError:\n result = result.strip()\n\n return result\n\n def is_termination_message(self, message: ChatMessage) -> bool:\n return message.content.strip().endswith(\"TERMINATE\")"
},
{
"identifier": "GroupBasedChatParticipant",
"path": "chatflock/participants/group.py",
"snippet": "class GroupBasedChatParticipant(ActiveChatParticipant):\n inner_chat_conductor: ChatConductor\n inner_chat: Chat\n mission: str\n spinner: Optional[Halo] = None\n clear_inner_chat_before_responding: bool = False\n\n def __init__(\n self,\n group_name: str,\n chat: Chat,\n mission: str,\n chat_conductor: ChatConductor,\n clear_inner_chat_before_responding: bool = False,\n spinner: Optional[Halo] = None,\n **kwargs: Any,\n ) -> None:\n self.inner_chat = chat\n self.inner_chat_conductor = chat_conductor\n self.clear_inner_chat_before_responding = clear_inner_chat_before_responding\n self.mission = mission\n self.spinner = spinner\n\n # Make sure the inner chat is aligned\n self.inner_chat.name = group_name\n\n super().__init__(name=group_name, **kwargs)\n\n # Make sure the chat & conductor are initialized, as it may be a dynamic chat with\n # no participants yet.\n self.inner_chat_conductor.prepare_chat(chat=self.inner_chat)\n\n def respond_to_chat(self, chat: \"Chat\") -> str:\n if self.clear_inner_chat_before_responding:\n self.inner_chat.clear_messages()\n\n prev_spinner_text = None\n if self.spinner is not None:\n prev_spinner_text = self.spinner.text\n self.spinner.stop_and_persist(symbol=\"👥\", text=f\"{self.name} started a discussion.\")\n self.spinner.start(text=f\"{self.name} is discussing...\")\n\n messages = chat.get_messages()\n conversation_str = \"\\n\".join([f\"- {message.sender_name}: {message.content}\" for message in messages])\n\n leader = self.inner_chat.get_active_participants()[0]\n request_for_group, _ = get_response(\n query=\"Please translate the request for yourself in the external conversation into a collaboration \"\n \"request for your internal group. This is the external conversation:\"\n f\"\\n```{conversation_str}```\\n\\nThe group should understand exactly what to discuss, what to \"\n \"decide on, and how to respond back based on this. \",\n answerer=leader,\n )\n group_response = self.inner_chat_conductor.initiate_dialog(\n chat=self.inner_chat, initial_message=request_for_group\n )\n\n if self.spinner is not None:\n self.spinner.succeed(text=f\"{self.name} concluded their discussion.\")\n if prev_spinner_text is not None:\n self.spinner.start(text=prev_spinner_text)\n messages = self.inner_chat.get_messages()\n group_response_conversation_str = \"\\n\".join(\n [f\"- {message.sender_name}: {message.content}\" for message in messages]\n )\n leader_response_back, _ = get_response(\n query=str(\n StructuredString(\n sections=[\n Section(name=\"External Conversation\", text=conversation_str),\n Section(name=\"Internal Group Conversation\", text=group_response_conversation_str),\n Section(\n name=\"Task\",\n text=\"You are a part of the EXTERNAL CONVERSATION and need to respond back. \"\n \"You and your group have collaborated on a response back for the \"\n \"EXTERNAL CONVERSATION. Please transform the INTERNAL GROUP CONVERSATION into \"\n \"a proper, in-context response back (in your name) for the EXTERNAL CONVERSATION; \"\n \"it should be mainly based on the conclusion of the internal conversation. \"\n \"Your response will be sent to the EXTERNAL CONVERSATION verbatim.\",\n ),\n ]\n )\n ),\n answerer=leader,\n )\n\n return leader_response_back\n\n def __str__(self) -> str:\n active_participants = self.inner_chat.get_active_participants()\n\n if len(active_participants) > 0:\n names = [str(p) for p in active_participants]\n return f'{self.name} (Includes: {\", \".join(names)})'\n\n return self.name\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n participants = self.inner_chat.get_active_participants()\n members_str = \"\\n\\n\".join([p.detailed_str(level=level + 1) for p in participants])\n\n return (\n f'{prefix}- Name: {self.name}\\n{prefix} Symbol: {self.symbol}\\n{prefix} Mission: \"{self.mission}\"'\n f\"\\n{members_str}\"\n )"
},
{
"identifier": "LangChainBasedAIChatParticipant",
"path": "chatflock/participants/langchain.py",
"snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )"
},
{
"identifier": "UserChatParticipant",
"path": "chatflock/participants/user.py",
"snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\""
},
{
"identifier": "TerminalChatRenderer",
"path": "chatflock/renderers/terminal.py",
"snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")"
},
{
"identifier": "create_chat_model",
"path": "examples/common.py",
"snippet": "def create_chat_model(\n model: str = \"gpt-4-1106-preview\",\n temperature: float = 0.0,\n cache_db_file_path: Optional[str] = \"output/llm_cache.db\",\n) -> BaseChatModel:\n if cache_db_file_path is not None:\n Path(cache_db_file_path).parent.mkdir(parents=True, exist_ok=True)\n\n set_llm_cache(SQLiteCache(database_path=cache_db_file_path))\n\n chat_model = ChatOpenAI(temperature=temperature, model=model)\n\n return chat_model"
}
] | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import LangChainBasedAIChatConductor, RoundRobinChatConductor
from chatflock.participants.group import GroupBasedChatParticipant
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model | 8,440 |
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
|
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user", | chat=Chat( | 1 | 2023-11-12 11:10:58+00:00 | 12k |
CryptoFuzzPy/cryptofuzz | cryptofuzz/Wallet.py | [
{
"identifier": "Convertor",
"path": "cryptofuzz/utils.py",
"snippet": "class Convertor:\n def __init__(self):\n super().__init__()\n self.gen = Generator()\n \n def double_sha256(self, data):\n return hashlib.sha256(hashlib.sha256(data).digest()).digest()\n \n def mne_to_seed(self, mnemonic, password=\"\"):\n salt = (\"mnemonic\" + password).encode('utf-8')\n seed = hashlib.pbkdf2_hmac('sha512', mnemonic.encode('utf-8'), salt, 2048)\n return seed[:32]\n\n def mne_to_bytes(self, mnemonic):\n return self.mne_to_seed(mnemonic)\n\n def mne_to_hex(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_hex(seed)\n\n def mne_to_wif(self, mnemonic, compress: bool = False):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_wif(seed, compress)\n\n def mne_to_int(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_int(seed)\n\n def mne_to_xpub(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_xpub(seed)\n\n def mne_to_xprv(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_xprv(seed)\n\n def mne_to_addr(self, mnemonic, compress: bool = False):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_addr(seed, compress)\n\n def mne_to_binary(self, mnemonic):\n seed = self.mne_to_seed(mnemonic)\n return self.bytes_to_binary(seed)\n\n def bytes_to_mne(self, seed):\n return Mnemonic().to_mnemonic(seed)\n\n def bytes_to_seed(self, seed):\n return hashlib.pbkdf2_hmac('sha512', seed, b'mnemonic', 2048)\n\n def bytes_to_hex(self, seed):\n return binascii.hexlify(self.bytes_to_seed(seed)).decode('utf-8')\n\n def unHexlify(self, h: str):\n return binascii.unhexlify(h)\n \n def hex_to_bytes(self, hexed):\n return binascii.unhexlify(hexed)\n\n def hex_to_mne(self, hexed: str) -> str:\n seed = self.hex_to_bytes(hexed)\n return self.bytes_to_mne(seed)\n\n def hex_to_wif(self, hexed, compress: bool = False) -> str:\n return self.bytes_to_wif(self.hex_to_bytes(hexed), compress)\n\n def hex_to_xprv(self, hexed: str) -> str:\n return self.bytes_to_xprv(self.hex_to_bytes(hexed))\n\n def hex_to_xpub(self, hexed: str) -> str:\n return self.bytes_to_xpub(self.hex_to_bytes(hexed))\n \n def hex_to_int(self, hexed: str) -> int:\n return int(hexed, 16)\n \n def hex_to_pub(self, hexed: str, compress: bool = False) -> bytes:\n if compress:\n return self.bytes_to_public(self.hex_to_bytes(hexed), True)\n else:\n return self.bytes_to_public(self.hex_to_bytes(hexed), False)\n \n def hex_to_addr(self, hexed: str, compress: bool = False) -> str:\n seed = self.hex_to_bytes(hexed)\n if compress:\n return self.bytes_to_addr(seed, True)\n else:\n return self.bytes_to_addr(seed, False)\n\n def hex_to_binary(self, hexed: str) -> str:\n return self.bytes_to_binary(self.hex_to_bytes(hexed))\n \n def bytes_to_hex(self, seed):\n privatekey_int = int.from_bytes(hashlib.sha256(seed).digest(), byteorder='big')\n self.gen.checkValid(privatekey_int)\n pvkByte = privatekey_int.to_bytes(32, byteorder='big')\n return pvkByte.hex()\n \n def bytes_to_int(self, seed) -> int:\n return int.from_bytes(seed, byteorder='big')\n \n def bytes_to_pub(self, seed_bytes: bytes) -> bytes:\n sk = ecdsa.SigningKey.from_string(seed_bytes[:32], curve=ecdsa.SECP256k1)\n vk = sk.get_verifying_key()\n pub = COMPRESSED_PREFIX2 + vk.to_string()[-32:] if vk.to_string()[-1] % 2 == 0 else b'\\x03' + vk.to_string()[-32:]\n return pub\n \n def bytes_to_public(self, seed: bytes, compress: bool = True) -> bytes:\n sk = ecdsa.SigningKey.from_string(seed, curve=ecdsa.SECP256k1)\n vk = sk.get_verifying_key()\n if compress:\n prefix = COMPRESSED_PREFIX2 if vk.pubkey.point.y() % 2 == 0 else COMPRESSED_PREFIX\n return prefix + vk.to_string()[:32]\n else:\n return UNCOMPRESSED_PREFIX + vk.to_string()\n \n def bytes_to_xpub(self, seed: bytes, chain_code=None) -> str:\n if chain_code is None:\n chain_code = os.urandom(32) # .hex\n prefix = self.unHexlify(XPUB_PREFIX)\n FINGERPRINT = ZERO_BYTES + ZERO_BYTES\n pub = self.bytes_to_pub(seed)\n xpub = prefix + MAIN_DIGEST + FINGERPRINT + chain_code + pub\n Hash64 = self.double_sha256(xpub)\n xpub += Hash64[:4]\n xpubBase58 = b58encode(xpub)\n return xpubBase58.decode('utf-8')\n \n def bytes_to_mne(self, byte: bytes):\n seed = byte[:32]\n return Mnemonic(\"english\").to_mnemonic(seed)\n \n def bytes_to_binary(self, bytes_: bytes) -> str:\n if len(bytes_) != 32:\n raise ValueError(\"Input bytes should have a length of 32.\")\n \n # Convert each byte to its binary representation and pad with zeros\n return ''.join(format(byte, '08b') for byte in bytes_)\n \n def bytes_to_wif(self, private_key, compress=True):\n if compress:\n EXTENDED_KEY = MAIN_PREFIX + private_key + MAIN_SUFFIX\n else:\n EXTENDED_KEY = MAIN_PREFIX + private_key\n \n DOUBLE_SHA256 = self.double_sha256(EXTENDED_KEY)\n CHECKSUM = DOUBLE_SHA256[:4]\n \n WIF = b58encode(EXTENDED_KEY + CHECKSUM)\n \n return WIF.decode('utf-8')\n \n def bytes_to_xprv(self, bytes_code: bytes) -> str:\n chain_code = bytes.fromhex(ZERO_BASE_NET)\n child_number = struct.pack('>L', 0)\n key = MAIN_DIGEST_RMD160 + bytes_code # 0x00\n \n xprv_main = VERSION_NETWORK + MAIN_DIGEST_RMD160 + FINGERPRINT_RMD160 + child_number + chain_code + key\n decode_main = base58encodeCheck(b\"\", xprv_main)\n return decode_main\n \n def bytes_to_addr(self, seedBytes: bytes, compress: bool = False) -> str:\n if len(seedBytes) != 32:\n seedBytes = seedBytes[:32]\n elif compress:\n pub = self.bytes_to_public(seedBytes, compress=True)\n return self.pub_to_addr(public_key=pub)\n else:\n pub = self.bytes_to_public(seedBytes, compress=False)\n return self.pub_to_addr(public_key=pub)\n \n # ------------------------------------------------------------\n def pass_to_hex(self, passphrase):\n return hashlib.sha256(passphrase.encode()).hexdigest()\n \n def pass_to_bytes(self, passphrase: str) -> bytes:\n return bytes.fromhex(self.pass_to_hex(passphrase))\n \n def pass_to_addr(self, passphrase, compress=False):\n passBytes = self.pass_to_bytes(passphrase)\n sk = ecdsa.SigningKey.from_string(passBytes, curve=ecdsa.SECP256k1)\n vk = sk.verifying_key\n if compress:\n if vk.pubkey.point.y() & 1:\n pub_key = COMPRESSED_PREFIX + vk.to_string()[:32]\n else:\n pub_key = COMPRESSED_PREFIX2 + vk.to_string()[:32]\n else:\n pub_key = UNCOMPRESSED_PREFIX + vk.to_string()\n sha = hashlib.sha256(pub_key).digest()\n ripemd160 = hashlib.new('ripemd160')\n ripemd160.update(sha)\n \n address = base58_check_encode(ripemd160.digest())\n return \"1\" + address\n \n def pass_to_wif(self, passphrase, compress=False):\n passBytes = self.pass_to_bytes(passphrase)\n return self.bytes_to_wif(passBytes, compress)\n \n def pass_to_xprv(self, passphrase):\n return self.bytes_to_xprv(self.pass_to_bytes(passphrase))\n \n # ------------------------------------------------------------\n \n def pub_to_bytes(self, pubkey, compress=True):\n if compress:\n prefix = (COMPRESSED_PREFIX if pubkey.pubkey.point.y() & 1 else COMPRESSED_PREFIX2)\n return prefix + pubkey.pubkey.point.x().to_bytes(32, 'big')\n else:\n point_x = pubkey.pubkey.point.x().to_bytes(32, 'big')\n point_y = pubkey.pubkey.point.y().to_bytes(32, 'big')\n return UNCOMPRESSED_PREFIX + point_x + point_y\n \n def pub_to_hex(self, pubkey, compress=True):\n return self.pub_to_bytes(pubkey, compress).hex()\n \n def pub_to_addr(self, public_key: bytes) -> str:\n ripemd160 = hashlib.new('ripemd160')\n ripemd160.update(hashlib.sha256(public_key).digest())\n hashed = MAIN_DIGEST_RMD160 + ripemd160.digest()\n checksum = hashlib.sha256(hashlib.sha256(hashed).digest()).digest()[:4]\n address = hashed + checksum\n return b58encode(address).decode('utf-8')\n \n # ------------------------------------------------------------\n \n def wif_to_bytes(self, wif):\n wif_bytes = b58decode(wif)\n isCompress = wif_bytes[-5] == 0x01 if len(wif_bytes) == 38 else False\n return wif_bytes[1:-5] if isCompress else wif_bytes[1:-4]\n\n def wif_to_binary(self, wif: str) -> str:\n pvkBytes = self.wif_to_bytes(wif)\n return self.bytes_to_binary(pvkBytes)\n def wif_to_addr(self, wif: str, compress: bool = False) -> str:\n pvkBytes = self.wif_to_bytes(wif)\n public_key = self.bytes_to_public(pvkBytes, compress)\n address = self.pub_to_addr(public_key)\n return address\n\n def wif_to_int(self, wif): return self.bytes_to_int(self.wif_to_bytes(wif))\n\n def wif_to_hex(self, wif): return self.wif_to_bytes(wif).hex()\n\n def wif_to_mne(self, wif): return self.bytes_to_mne(self.wif_to_bytes(wif))\n\n def wif_to_xprv(self, wif): return self.bytes_to_xprv(self.wif_to_bytes(wif))\n\n def wif_to_xpub(self, wif): return self.bytes_to_xpub(self.wif_to_bytes(wif))\n\n def wif_to_pub(self, wif): return self.bytes_to_public(self.wif_to_bytes(wif)).hex()\n # ------------------------------------------------------------\n \n def xprv_to_bytes(self, xprv: str):\n if not xprv.startswith(\"xprv\") or len(xprv) <= 4:\n raise ValueError(\"Invalid xprv format.\")\n xprv58 = xprv[4:]\n xprvBytes = base58decode(xprv58)\n return xprvBytes[:32]\n \n def xprv_to_addr(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n pub = self.bytes_to_public(seed, True)\n return self.pub_to_addr(pub)\n else:\n pub = self.bytes_to_public(seed, False)\n return self.pub_to_addr(pub)\n \n def xprv_to_pub(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n return self.bytes_to_public(seed, True)\n else:\n return self.bytes_to_public(seed, False)\n \n def xprv_to_wif(self, xprv, compress: bool = False):\n seed = self.xprv_to_bytes(xprv)\n if compress:\n return self.bytes_to_wif(seed, True)\n else:\n return self.bytes_to_wif(seed, False)\n \n def xprv_to_mne(self, xprv):\n seed = self.xprv_to_bytes(xprv)\n return self.bytes_to_mne(seed)\n \n # ------------------------------------------------------------\n \n def binary_to_bytes(self, bin_str: str) -> bytes:\n if len(bin_str) != 256:\n raise ValueError(\"The binary string must have 256 characters.\")\n chunks = [bin_str[i:i + 8] for i in range(0, len(bin_str), 8)]\n return bytes([int(chunk, 2) for chunk in chunks])\n \n def int_to_bytes(self, int_dec: int) -> bytes:\n return int_dec.to_bytes(32, 'big')\n \n def int_to_hex(self, int_dec: int) -> str:\n return \"%064x\" % int_dec\n\n def int_to_mnemonic(self, int_dec: int) -> str:\n return self.bytes_to_mne(self.int_to_bytes(int_dec))\n\n def int_to_wif(self, int_dec: int, compress: bool = False) -> str:\n return self.bytes_to_wif(self.int_to_bytes(int_dec), compress)\n\n def int_to_xprv(self, int_dec: int) -> str:\n return self.bytes_to_xprv(self.int_to_bytes(int_dec))\n\n def int_to_xpub(self, int_dec: int) -> bytes:\n \"\"\"\n Convert int decimal to public key (``bytes``).\n\n :param int_dec:\n :type int_dec: int\n :return:\n :rtype: bytes\n\n \"\"\"\n return self.bytes_to_xpub(self.int_to_bytes(int_dec))\n\n def int_to_addr(self, int_dec: int, compress: bool = False) -> str:\n \"\"\"\n Convert int decimal to compress & uncompress address (``str``).\n\n :param int_dec:\n :type int_dec: int\n :param compress:\n :type compress: bool\n :return:\n :rtype: str\n \"\"\"\n return self.bytes_to_addr(self.int_to_bytes(int_dec), compress)\n\n def int_to_binary(self, int_dec: int) -> str:\n return self.bytes_to_binary(self.int_to_bytes(int_dec))\n # ------------------------------------------------------------"
},
{
"identifier": "Generator",
"path": "cryptofuzz/utils.py",
"snippet": "class Generator:\n def __init__(self):\n super().__init__()\n \n def checkValid(self, key: int) -> bool:\n if 0 < key < MAX_PRIVATE_KEY:\n return True\n else:\n raise ValueError(f\"Secret Scalar Must be greater than 0 and less than {MAX_PRIVATE_KEY}.\")\n \n def generate_private_key(self) -> str:\n randkey = \"\".join(random.choice(\"0123456789abcdef\") for _ in range(64))\n if self.checkValid(int(randkey, 16)):\n return randkey\n else:\n return self.generate_private_key()\n \n def generate_xprv(self):\n return \"xprv\" + binascii.hexlify(os.urandom(32)).decode('utf-8')\n \n def generate_decimal(self) -> int: return random.randint(0, MAX_PRIVATE_KEY)\n def generate_binary(self) -> str:\n return \"\".join(random.choice(\"01\") for _ in range(256))\n \n def generate_entropy(self, entropy_bits=256):\n entropy = os.urandom(entropy_bits // 8)\n checksum = hashlib.sha256(entropy).digest()[0]\n entropy_with_checksum = entropy + bytes([checksum])\n return entropy_with_checksum\n \n def generate_mnemonic(self, size: int) -> str:\n characters = re.findall('[A-Z][a-z]+', BIP39)\n return \" \".join(random.choices(characters, k=size)).lower()"
},
{
"identifier": "Axe",
"path": "cryptofuzz/hd.py",
"snippet": "class Axe:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Axe Format Type Addresses.\n :param hexed:\n :type hexed:\n :rtype str:\n :return: Str - address\n\n -------------------------------------------------------------\n\n >>> Axe_ = Axe()\n >>> privatekey = \"e3b0c44298fc1c149...................\"\n >>> Axe_address = Axe_.hex_addr(privatekey)\n\n -------------------------------------------------------------\n \n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(AXE)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Bitcoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Bitcoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n Convert Private key Hex To All Bitcoin Format Type Addresses, Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n :param hexed:\n :param Type:\n :rtype str:\n :return address:\n\n\n -----------------------------------------------------------------------------------------------\n\n >>> btc = Bitcoin()\n >>> privatekey = \"0A97965.........0102F6A45517\" # example Private Key\n >>> p2pkh_addr = btc.hex_addr(privatekey, Type='p2pkh')\n >>> p2sh_addr = btc.hex_addr(privatekey, Type='p2sh')\n >>> p2wpkh_addr = btc.hex_addr(privatekey, Type='p2wpkh')\n >>> p2wsh_addr = btc.hex_addr(privatekey, Type='p2wsh')\n >>> p2wpkh_p2sh_addr = btc.hex_addr(privatekey, Type='p2wpkh_p2sh')\n >>> p2wsh_p2sh_addr = btc.hex_addr(privatekey, Type='p2wsh_p2sh')\n\n ---------------------------------------------------------------------------------------------\n\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(BTC)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n elif Type == 'p2wpkh':\n return hd.p2wpkh_address()\n elif Type == 'p2wsh':\n return hd.p2wsh_address()\n elif Type == 'p2wpkh_p2sh':\n return hd.p2wpkh_in_p2sh_address()\n elif Type == 'p2wsh_p2sh':\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "BitcoinGold",
"path": "cryptofuzz/hd.py",
"snippet": "class BitcoinGold:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = \"p2pkh\") -> str:\n \"\"\"\n\n Convert Private key Hex To All BitcoinGold Format Type Address , Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n\n :param hexed:\n :type hexed: Str.\n :param Type:\n :type Type: Str.\n :rtype: Str.\n :return address:\n\n\n --------------------------------------------------------------\n\n >>> btg = BitcoinGold()\n >>> privatekey = \"0A9796542F1030931E317...............960DC79C48D20102F6A45517\"\n >>> p2pkh_address = btg.hex_addr(privatekey, \"p2pkh\")\n >>> p2sh_address = btg.hex_addr(privatekey, \"p2sh\")\n >>> p2wpkh_address = btg.hex_addr(privatekey, \"p2wpkh\")\n >>> p2wsh_address = btg.hex_addr(privatekey, \"p2wsh\")\n >>> p2wpkh_in_p2sh_address = btg.hex_addr(privatekey, \"p2wpkh_p2sh\")\n >>> p2wsh_in_p2sh_address = btg.hex_addr(privatekey, \"p2wsh_p2sh\")\n\n --------------------------------------------------------------\n\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(BTG)\n hd.from_private_key(hexed)\n if Type == \"p2pkh\":\n return hd.p2pkh_address()\n elif Type == \"p2sh\":\n return hd.p2sh_address()\n elif Type == \"p2wpkh\":\n return hd.p2wpkh_address()\n elif Type == \"p2wsh\":\n return hd.p2wsh_address()\n elif Type == \"p2wpkh_p2sh\":\n return hd.p2wpkh_in_p2sh_address()\n elif Type == \"p2wsh_p2sh\":\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Dash",
"path": "cryptofuzz/hd.py",
"snippet": "class Dash:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Dash Address .\n :param hexed:\n :rtype str:\n :return: Str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DASH)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "DigiByte",
"path": "cryptofuzz/hd.py",
"snippet": "class DigiByte:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To DigiByte Address.\n\n :param hexed:\n :rtype str:\n :return: Str - address\n\n\n --------------------------------------------------------------\n\n >>> dgb = DigiByte()\n >>> privatekey = \"0A97965...A45517\" # example Private Key\n >>> digibyte_address = dgb.hex_addr(privatekey)\n\n --------------------------------------------------------------\n\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DGB)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Dogecoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Dogecoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n Generate Private key Hex Address To All Dogecoin Format Type Address , Type: `p2pkh`, `p2sh`.\n\n :param hexed:\n :type hexed: str\n :param Type:\n :type Type: str\n :rtype: str\n :return: str - address\n\n\n --------------------------------------------------------------\n\n >>> doge = Dogecoin()\n >>> privatekey = \"0A9796542F1030...02F6A45517\" # example Private Key\n >>> p2pkh_doge_addr = doge.hex_addr(privatekey, 'p2pkh')\n >>> p2sh_doge_addr = doge.hex_addr(privatekey, 'p2sh')\n\n --------------------------------------------------------------\n\n \"\"\"\n\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(DOGE)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n else:\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Ethereum",
"path": "cryptofuzz/hd.py",
"snippet": "class Ethereum:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Ethereum Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(ETH)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Litecoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Litecoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str, Type: str = 'p2pkh') -> str:\n \"\"\"\n\n ------------------------------------------\n Convert Private key Hex To All Litecoin Format Type Address , Type: `p2pkh`, `p2sh`, `p2wpkh`, `p2wsh`, `p2wpkh_p2sh`, `p2wsh_p2sh`.\n :param hexed:\n :type hexed: str.\n :param Type:\n :type Type: str.\n :returns: address.\n\n ------------------------------------------\n\n >>> ltc = Litecoin()\n >>> privatekey = \"e3b0c44298fc1c149...................\"\n >>> p2pkh_address = ltc.hex_addr(privatekey, 'p2pkh')\n >>> p2sh_address = ltc.hex_addr(privatekey, 'p2sh')\n >>> p2wpkh_address = ltc.hex_addr(privatekey, 'p2wpkh')\n >>> p2wsh_address = ltc.hex_addr(privatekey, 'p2wsh')\n >>> p2wpkh_p2sh_address = ltc.hex_addr(privatekey, 'p2wpkh_p2sh')\n >>> p2wsh_p2sh_address = ltc.hex_addr(privatekey, 'p2wsh_p2sh')\n\n ------------------------------------------\n\n\n\n \"\"\"\n\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(LTC)\n hd.from_private_key(hexed)\n if Type == 'p2pkh':\n return hd.p2pkh_address()\n elif Type == 'p2sh':\n return hd.p2sh_address()\n elif Type == 'p2wpkh':\n return hd.p2wpkh_address()\n elif Type == 'p2wsh':\n return hd.p2wsh_address()\n elif Type == 'p2wpkh_p2sh':\n return hd.p2wpkh_in_p2sh_address()\n elif Type == 'p2wsh_p2sh':\n return hd.p2wsh_in_p2sh_address()\n else:\n return hd.p2pkh_address()\n\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Qtum",
"path": "cryptofuzz/hd.py",
"snippet": "class Qtum:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Qtum Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(QTUM)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Ravencoin",
"path": "cryptofuzz/hd.py",
"snippet": "class Ravencoin:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Ravencoin Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(RVN)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Tron",
"path": "cryptofuzz/hd.py",
"snippet": "class Tron:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n\n \"\"\"\n Convert Private key Hex To All Tron Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(TRX)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
},
{
"identifier": "Zcash",
"path": "cryptofuzz/hd.py",
"snippet": "class Zcash:\n def __int__(self):\n super().__init__()\n\n def hex_addr(self, hexed: str) -> str:\n \"\"\"\n Convert Private key Hex To All Zcash Format Type Address .\n :param hexed:\n :rtype str:\n :return: str - address\n \"\"\"\n if is_valid_hex(hexed):\n hd: HD_W = HD_W(ZEC)\n hd.from_private_key(hexed)\n return hd.p2pkh_address()\n else:\n ValueError(\"hex format invalid check again.[format: hex][64 length]\")"
}
] | import os
from . import Generator, Convertor
from . import (
Bitcoin, BitcoinGold, Dash, DigiByte, Dogecoin, Ethereum, Litecoin, Qtum, Ravencoin, Tron, Zcash, Axe
) | 9,593 | def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
| # programmer and owner mmdrza.com
# ----------------------------------------------------------
convertor = Convertor()
generator = Generator()
# ----------------------------------------------------------
def getPrivateKey() -> str:
"""
Generate a private key without repeating.
:return private key:
:rtype str:
---------------------------------------------------
>>> Privatekey = getPrivateKey()
---------------------------------------------------
"""
return generator.generate_private_key()
# ----------------------------------------------------------
def getMnemonic(size: int = 12) -> str:
"""
Generate Random Standard Mnemonic BIP39.
:param size:
:type size: Int
:return mnemonic:
:rtype str:
--------------------------------------------------
>>> Mnemonic = getMnemonic()
--------------------------------------------------
"""
return generator.generate_mnemonic(size=size)
# ----------------------------------------------------------
def getBinary() -> str:
"""
Generate random Binary With Length 256 (256 bits).
:rtype str:
:return binary:
-------------------------------------------------
>>> Binary = getBinary()
------------------------------------------------
"""
return generator.generate_binary()
# ----------------------------------------------------------
def getRootKey() -> str:
"""
Generate Root Key.
:rtype str:
:return root key:
------------------------------------------------
>>> RootKey = getRootKey()
------------------------------------------------
"""
return generator.generate_xprv()
# -------------------------------------------------------------------
def getBytes() -> bytes: return os.urandom(32)
# -------------------------------------------------------------------
def getDecimal() -> int: return generator.generate_decimal()
# -------------------------------------------------------------------
def PrivateKey_To_Addr(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress Address.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return address:
:rtype str:
----------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_Addr(privatekey, True)
>>> address_uncompress = PrivateKey_To_Addr(privatekey, False)
----------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_Wif(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress WIF.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return wif:
:rtype str:
------------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> wif_compress = PrivateKey_To_Wif(privatekey, True)
>>> wif_uncompress = PrivateKey_To_Wif(privatekey, False)
------------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_PublicKey(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To compress and uncompress Public Key.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return public key:
:rtype str:
------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> publickey_compress = PrivateKey_To_PublicKey(privatekey, True)
>>> publickey_uncompress = PrivateKey_To_PublicKey(privatekey, False)
------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def PrivateKey_To_Mnemonic(hexed: str) -> str:
"""
Convert Private key Hex To Mnemonic.
:param hexed:
:type hexed: str
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> mnemonic = PrivateKey_To_Mnemonic(privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def PrivateKey_To_Byte(hexed: str) -> bytes:
"""
Convert Private key Hex To Byte.
:param hexed:
:type hexed: Str.
:return byte:
:rtype bytes:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> byte = PrivateKey_To_Byte(Privatekey)
--------------------------------------------------------
"""
return convertor.hex_to_bytes(hexed)
# ----------------------------------------------------------
def PrivateKey_To_Binary(hexed: str) -> str:
"""
Convert Private key Hex To Binary.
:param hexed:
:type hexed: Str
:return binary:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> binary = PrivateKey_To_Binary(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def PrivateKey_To_Decimal(hexed: str) -> int:
"""
Convert Private key Hex To Decimal.
:param hexed:
:type hexed: Str
:return decimal:
:rtype int:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> decimal = PrivateKey_To_Decimal(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def PrivateKey_To_XPRV(hexed: str) -> str:
"""
Convert Private key Hex To XPRV.
:param hexed:
:type hexed: Str
:return xprv:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xprv = PrivateKey_To_XPRV(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def PrivateKey_To_CompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To Compress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_CompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def PrivateKey_To_UncompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To UnCompress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_uncompress = PrivateKey_To_UncompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_XPUB(hexed: str) -> str:
"""
Convert Private key Hex To XPUB.
:param hexed:
:type hexed: Str
:return xpub:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xpub = PrivateKey_To_XPUB(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_PrivateKey(byte: bytes) -> str:
"""
Convert Byte To Private Key.
:param byte:
:type byte: Bytes
:return private key:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> privatekey = Bytes_To_PrivateKey(Privatekey)
--------------------------------------------------------
"""
return convertor.bytes_to_hex(byte)
# ----------------------------------------------------------
def Bytes_To_Address(seed: bytes, compress: bool = False):
"""
Convert Bytes To Compressed and Uncompressed Address.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Address(seedBytes, True)
>>> address_uncompress = Bytes_To_Address(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_addr(seedBytes=seed, compress=True)
else:
return convertor.bytes_to_addr(seedBytes=seed, compress=False)
# ----------------------------------------------------------
def Bytes_To_Mnemonic(seed: bytes) -> str:
"""
Convert Bytes To Mnemonic.
:param seed:
:type seed: Bytes
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> mnemonic = Bytes_To_Mnemonic(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Bytes_To_XPRV(seed: bytes) -> str:
"""
Convert Bytes To XPRV.
:param seed:
:type seed: Bytes
:return xprv:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> xprv = Bytes_To_XPRV(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Bytes_To_Binary(seed: bytes):
"""
Convert Bytes To Binary.
:param seed:
:type seed: Bytes
:return binary:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> binary = Bytes_To_Binary(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Bytes_To_PublicKey(seed: bytes, compress: bool = False):
"""
Convert Bytes To Public Key Compressed and Uncompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return public:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> public_compress = Bytes_To_PublicKey(seedBytes, True)
>>> public_uncompress = Bytes_To_PublicKey(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Bytes_To_Compress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Compressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Compress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def Bytes_To_Uncompress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Uncompressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_uncompress = Bytes_To_Uncompress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Bytes_To_Decimal(seed: bytes):
"""
Convert Bytes To Decimal.
:param seed:
:type seed: Bytes
:return decimal:
:rtype int:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> decimal = Bytes_To_Decimal(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Bytes_To_XPUB(seed: bytes) -> str:
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_Wif(seed: bytes, compress: bool = False) -> str:
"""
Convert Bytes To Wif Compressed and UnCompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return wif:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> wif_compress = Bytes_To_Wif(seedBytes, True)
>>> wif_uncompress = Bytes_To_Wif(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Mnemonic_To_Bytes(mnemonic: str) -> bytes:
return convertor.mne_to_seed(mnemonic=mnemonic)
# ----------------------------------------------------------
def Mnemonic_To_PrivateKey(mnemonic: str) -> str:
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_hex(seed=seed)
# ----------------------------------------------------------
def Mnemonic_To_PublicKey(mnemonic: str, compress: bool = False):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
if compress:
pub = convertor.bytes_to_public(seed, True).hex()
return convertor.pub_to_addr(pub)
else:
pub = convertor.bytes_to_public(seed, False).hex()
return convertor.pub_to_addr(pub)
# ----------------------------------------------------------
def Mnemonic_To_Decimal(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Mnemonic_To_Binary(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Mnemonic_To_XPRV(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xprv(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Addr(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_addr(seedBytes, True)
else:
return convertor.bytes_to_addr(seedBytes, False)
# ----------------------------------------------------------
def Mnemonic_To_XPUB(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xpub(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Wif(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_wif(seedBytes, True)
else:
return convertor.bytes_to_wif(seedBytes, False)
# ----------------------------------------------------------
def Passphrase_To_Addr(passphrase: str, compress: bool = False) -> str:
if compress:
return convertor.pass_to_addr(passphrase, True)
else:
return convertor.pass_to_addr(passphrase, False)
# ----------------------------------------------------------
def Passphrase_To_Bytes(passphrase: str) -> bytes:
return convertor.pass_to_bytes(passphrase)
# ----------------------------------------------------------
def Passphrase_To_PrivateKey(passphrase: str) -> str:
return convertor.bytes_to_hex(convertor.pass_to_bytes(passphrase))
# ----------------------------------------------------------
def Passphrase_To_PublicKey(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Passphrase_To_Wif(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Passphrase_To_RootKey(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Passphrase_To_XPUB(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Passphrase_To_Decimal(passphrase: str) -> int:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Wif_To_Bytes(wif: str) -> bytes:
return convertor.wif_to_bytes(wif)
# ----------------------------------------------------------
def Wif_To_Addr(wif: str, compress: bool = False) -> str:
return convertor.wif_to_addr(wif, compress)
# ----------------------------------------------------------
def Wif_To_PrivateKey(wif: str) -> str:
return convertor.bytes_to_hex(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Mnemonic(wif: str) -> str:
return convertor.bytes_to_mne(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Decimal(wif: str) -> int:
return convertor.bytes_to_int(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Binary(wif: str) -> str:
return convertor.bytes_to_binary(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPRV(wif: str) -> str:
return convertor.bytes_to_xprv(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPUB(wif: str) -> str: return convertor.bytes_to_xpub(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_RootKey(wif: str) -> str:
return Wif_To_XPRV(wif)
# ----------------------------------------------------------
def Wif_To_PublicKey(wif: str, compress: bool = False):
seed = convertor.wif_to_bytes(wif)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_PrivateKey(dec: int) -> str:
return "%064x" % dec
# ----------------------------------------------------------
def Decimal_To_Bytes(dec: int) -> bytes:
return convertor.int_to_bytes(dec)
# ----------------------------------------------------------
def Decimal_To_PublicKey(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_Address(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Decimal_To_Mnemonic(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Decimal_To_XPRV(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Decimal_To_XPUB(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Decimal_To_Binary(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_binary(seed)
def Decimal_To_Wif(dec: int, compress: bool = False) -> str:
seed = convertor.int_to_bytes(dec)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Binary_To_Bytes(binary_str: str) -> bytes:
return convertor.binary_to_bytes(binary_str)
# ----------------------------------------------------------
def Binary_To_Address(binary_str: str, compress: bool = False) -> str:
seed = convertor.binary_to_bytes(binary_str)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Binary_To_PrivateKey(binary_str: str) -> str: return convertor.bytes_to_hex(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Mnemonic(binary_str: str) -> str: return convertor.bytes_to_mne(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPRV(binary_str: str) -> str: return convertor.bytes_to_xprv(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPUB(binary_str: str) -> str: return convertor.bytes_to_xpub(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Wif(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_wif(
convertor.binary_to_bytes(binary_str), compress)
# ----------------------------------------------------------
def Binary_To_PublicKey(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.binary_to_bytes(binary_str), compress).hex()
# ----------------------------------------------------------
def Binary_To_Decimal(binary_str: str) -> int: return convertor.bytes_to_int(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def XPRV_To_Bytes(xprv: str) -> bytes: return convertor.xprv_to_bytes(xprv)
def XPRV_To_PrivateKey(xprv: str) -> str: return convertor.bytes_to_hex(convertor.xprv_to_bytes(xprv))
def XPRV_To_PublicKey(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.xprv_to_bytes(xprv), compress).hex()
def XPRV_To_Wif(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_wif(convertor.xprv_to_bytes(xprv),
compress)
def XPRV_To_Address(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_addr(
convertor.xprv_to_bytes(xprv), compress)
def XPRV_To_Mnemonic(xprv: str) -> str: return convertor.bytes_to_mne(convertor.xprv_to_bytes(xprv))
def XPRV_To_XPUB(xprv: str) -> str: return convertor.bytes_to_xpub(convertor.xprv_to_bytes(xprv))
def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
""" | DASH = Dash() | 5 | 2023-11-10 14:51:41+00:00 | 12k |
itzshukla/STRANGER-USERBOT2.0 | Zaid/modules/basic/animation.py | [
{
"identifier": "edit_or_reply",
"path": "Zaid/helper/basic.py",
"snippet": "async def edit_or_reply(message: Message, *args, **kwargs) -> Message:\n apa = (\n message.edit_text\n if bool(message.from_user and message.from_user.is_self or message.outgoing)\n else (message.reply_to_message or message).reply_text\n )\n return await apa(*args, **kwargs)"
},
{
"identifier": "get_text",
"path": "Zaid/helper/basic.py",
"snippet": "def get_text(message: Message) -> [None, str]:\n \"\"\"Extract Text From Commands\"\"\"\n text_to_return = message.text\n if message.text is None:\n return None\n if \" \" in text_to_return:\n try:\n return message.text.split(None, 1)[1]\n except IndexError:\n return None\n else:\n return None"
},
{
"identifier": "MEMES",
"path": "Zaid/helper/constants.py",
"snippet": "class MEMES:\n REVERSE = (\n \"⠐⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠂\\n\"\n \"⠄⠄⣰⣾⣿⣿⣿⠿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣆⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⡿⠋⠄⡀⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⠋⣉⣉⣉⡉⠙⠻⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣇⠔⠈⣿⣿⣿⣿⣿⡿⠛⢉⣤⣶⣾⣿⣿⣿⣿⣿⣿⣦⡀⠹⠄⠄\\n\"\n \"⠄⠄⣿⣿⠃⠄⢠⣾⣿⣿⣿⠟⢁⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣿⣿⣿⣿⠟⢁⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣿⣿⡟⠁⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣿⠋⢠⣾⣿⣿⣿⣿⣿⣿⡿⠿⠿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⡿⠁⣰⣿⣿⣿⣿⣿⣿⣿⣿⠗⠄⠄⠄⠄⣿⣿⣿⣿⣿⣿⣿⡟⠄⠄\\n\"\n \"⠄⠄⣿⡿⠁⣼⣿⣿⣿⣿⣿⣿⡿⠋⠄⠄⠄⣠⣄⢰⣿⣿⣿⣿⣿⣿⣿⠃⠄⠄\\n\"\n \"⠄⠄⡿⠁⣼⣿⣿⣿⣿⣿⣿⣿⡇⠄⢀⡴⠚⢿⣿⣿⣿⣿⣿⣿⣿⣿⡏⢠⠄⠄\\n\"\n \"⠄⠄⠃⢰⣿⣿⣿⣿⣿⣿⡿⣿⣿⠴⠋⠄⠄⢸⣿⣿⣿⣿⣿⣿⣿⡟⢀⣾⠄⠄\\n\"\n \"⠄⠄⢀⣿⣿⣿⣿⣿⣿⣿⠃⠈⠁⠄⠄⢀⣴⣿⣿⣿⣿⣿⣿⣿⡟⢀⣾⣿⠄⠄\\n\"\n \"⠄⠄⢸⣿⣿⣿⣿⣿⣿⣿⠄⠄⠄⠄⢶⣿⣿⣿⣿⣿⣿⣿⣿⠏⢀⣾⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣿⣿⣿⣿⣷⣶⣶⣶⣶⣶⣿⣿⣿⣿⣿⣿⣿⠋⣠⣿⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢁⣼⣿⣿⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢁⣴⣿⣿⣿⣿⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠟⢁⣴⣿⣿⣿⣿⠗⠄⠄⣿⣿⠄⠄\\n\"\n \"⠄⠄⣆⠈⠻⢿⣿⣿⣿⣿⣿⣿⠿⠛⣉⣤⣾⣿⣿⣿⣿⣿⣇⠠⠺⣷⣿⣿⠄⠄\\n\"\n \"⠄⠄⣿⣿⣦⣄⣈⣉⣉⣉⣡⣤⣶⣿⣿⣿⣿⣿⣿⣿⣿⠉⠁⣀⣼⣿⣿⣿⠄⠄\\n\"\n \"⠄⠄⠻⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣶⣾⣿⣿⡿⠟⠄⠄\\n\"\n \"⠠⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄\\n\"\n )\n\n SLAP_TEMPLATES = [\n \"{hits} {victim} with a {item}.\",\n \"{hits} {victim} in the face with a {item}.\",\n \"{hits} {victim} around a bit with a {item}.\",\n \"{throws} a {item} at {victim}.\",\n \"grabs a {item} and {throws} it at {victim}'s face.\",\n \"{hits} a {item} at {victim}.\",\n \"{throws} a few {item} at {victim}.\",\n \"grabs a {item} and {throws} it in {victim}'s face.\",\n \"launches a {item} in {victim}'s general direction.\",\n \"sits on {victim}'s face while slamming a {item} {where}.\",\n \"starts slapping {victim} silly with a {item}.\",\n \"pins {victim} down and repeatedly {hits} them with a {item}.\",\n \"grabs up a {item} and {hits} {victim} with it.\",\n \"starts slapping {victim} silly with a {item}.\",\n \"holds {victim} down and repeatedly {hits} them with a {item}.\",\n \"prods {victim} with a {item}.\",\n \"picks up a {item} and {hits} {victim} with it.\",\n \"ties {victim} to a chair and {throws} a {item} at them.\",\n \"{hits} {victim} {where} with a {item}.\",\n \"ties {victim} to a pole and whips them {where} with a {item}.\"\n \"gave a friendly push to help {victim} learn to swim in lava.\",\n \"sent {victim} to /dev/null.\",\n \"sent {victim} down the memory hole.\",\n \"beheaded {victim}.\",\n \"threw {victim} off a building.\",\n \"replaced all of {victim}'s music with Nickelback.\",\n \"spammed {victim}'s email.\",\n \"made {victim} a knuckle sandwich.\",\n \"slapped {victim} with pure nothing.\",\n \"hit {victim} with a small, interstellar spaceship.\",\n \"quickscoped {victim}.\",\n \"put {victim} in check-mate.\",\n \"RSA-encrypted {victim} and deleted the private key.\",\n \"put {victim} in the friendzone.\",\n \"slaps {victim} with a DMCA takedown request!\",\n ]\n\n ITEMS = [\n \"cast iron skillet\",\n \"large trout\",\n \"baseball bat\",\n \"cricket bat\",\n \"wooden cane\",\n \"nail\",\n \"printer\",\n \"shovel\",\n \"pair of trousers\",\n \"CRT monitor\",\n \"diamond sword\",\n \"baguette\",\n \"physics textbook\",\n \"toaster\",\n \"portrait of Richard Stallman\",\n \"television\",\n \"mau5head\",\n \"five ton truck\",\n \"roll of duct tape\",\n \"book\",\n \"laptop\",\n \"old television\",\n \"sack of rocks\",\n \"rainbow trout\",\n \"cobblestone block\",\n \"lava bucket\",\n \"rubber chicken\",\n \"spiked bat\",\n \"gold block\",\n \"fire extinguisher\",\n \"heavy rock\",\n \"chunk of dirt\",\n \"beehive\",\n \"piece of rotten meat\",\n \"bear\",\n \"ton of bricks\",\n ]\n\n THROW = [\n \"throws\",\n \"flings\",\n \"chucks\",\n \"hurls\",\n ]\n\n HIT = [\n \"hits\",\n \"whacks\",\n \"slaps\",\n \"smacks\",\n \"bashes\",\n ]\n\n WHERE = [\"in the chest\", \"on the head\", \"on the butt\", \"on the crotch\"]\n\n REPLACEMENT_MAP = {\n \"a\": \"ɐ\",\n \"b\": \"q\",\n \"c\": \"ɔ\",\n \"d\": \"p\",\n \"e\": \"ǝ\",\n \"f\": \"ɟ\",\n \"g\": \"ƃ\",\n \"h\": \"ɥ\",\n \"i\": \"ᴉ\",\n \"j\": \"ɾ\",\n \"k\": \"ʞ\",\n \"l\": \"l\",\n \"m\": \"ɯ\",\n \"n\": \"u\",\n \"o\": \"o\",\n \"p\": \"d\",\n \"q\": \"b\",\n \"r\": \"ɹ\",\n \"s\": \"s\",\n \"t\": \"ʇ\",\n \"u\": \"n\",\n \"v\": \"ʌ\",\n \"w\": \"ʍ\",\n \"x\": \"x\",\n \"y\": \"ʎ\",\n \"z\": \"z\",\n \"A\": \"∀\",\n \"B\": \"B\",\n \"C\": \"Ɔ\",\n \"D\": \"D\",\n \"E\": \"Ǝ\",\n \"F\": \"Ⅎ\",\n \"G\": \"פ\",\n \"H\": \"H\",\n \"I\": \"I\",\n \"J\": \"ſ\",\n \"K\": \"K\",\n \"L\": \"˥\",\n \"M\": \"W\",\n \"N\": \"N\",\n \"O\": \"O\",\n \"P\": \"Ԁ\",\n \"Q\": \"Q\",\n \"R\": \"R\",\n \"S\": \"S\",\n \"T\": \"┴\",\n \"U\": \"∩\",\n \"V\": \"Λ\",\n \"W\": \"M\",\n \"X\": \"X\",\n \"Y\": \"⅄\",\n \"Z\": \"Z\",\n \"0\": \"0\",\n \"1\": \"Ɩ\",\n \"2\": \"ᄅ\",\n \"3\": \"Ɛ\",\n \"4\": \"ㄣ\",\n \"5\": \"ϛ\",\n \"6\": \"9\",\n \"7\": \"ㄥ\",\n \"8\": \"8\",\n \"9\": \"6\",\n \",\": \"'\",\n \".\": \"˙\",\n \"?\": \"¿\",\n \"!\": \"¡\",\n '\"': \",,\",\n \"'\": \",\",\n \"(\": \")\",\n \")\": \"(\",\n \"[\": \"]\",\n \"]\": \"[\",\n \"{\": \"}\",\n \"}\": \"{\",\n \"<\": \">\",\n \">\": \"<\",\n \"&\": \"⅋\",\n \"_\": \"‾\",\n }\n\n SHRUGS = [\n \"┐(´д`)┌\",\n \"┐(´~`)┌\",\n \"┐(´ー`)┌\",\n \"┐( ̄ヘ ̄)┌\",\n \"╮(╯∀╰)╭\",\n \"╮(╯_╰)╭\",\n \"┐(´д`)┌\",\n \"┐(´∀`)┌\",\n \"ʅ(́◡◝)ʃ\",\n \"┐(゚~゚)┌\",\n \"┐('д')┌\",\n \"┐(‘~`;)┌\",\n \"ヘ(´-`;)ヘ\",\n \"┐( -“-)┌\",\n \"ʅ(´◔౪◔)ʃ\",\n \"ヽ(゜~゜o)ノ\",\n \"ヽ(~~~ )ノ\",\n \"┐(~ー~;)┌\",\n \"┐(-。ー;)┌\",\n r\"¯\\_(ツ)_/¯\",\n r\"¯\\_(⊙_ʖ⊙)_/¯\",\n r\"¯\\_༼ ಥ ‿ ಥ ༽_/¯\",\n \"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ\",\n r\"¯\\_༼ •́ ͜ʖ •̀ ༽_/¯\",\n r\"¯\\_( ͡° ͜ʖ ͡°)_/¯\",\n r\"¯\\(°_o)/¯\",\n \"┐( ∵ )┌\",\n r\"¯\\_༼ᴼل͜ᴼ༽_/¯\",\n \"╮(. ❛ ᴗ ❛.)╭\",\n \"乁༼◉‿◉✿༽ㄏ\",\n r\"¯\\(◉‿◉)/¯\",\n r\"¯\\_ʘ‿ʘ_/¯\",\n r\"¯\\_༼ ಥ ‿ ಥ ༽_/¯\",\n \"╮(^▽^)╭\",\n \"乁[ ◕ ᴥ ◕ ]ㄏ\",\n \"乁[ᓀ˵▾˵ᓂ]ㄏ\",\n \"┐(´(エ)`)┌\",\n \"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ\",\n r\"¯\\_( ͠° ͟ʖ °͠ )_/¯\",\n \"乁( •_• )ㄏ\",\n \"乁| ・ 〰 ・ |ㄏ\",\n \"┐(‘~;)┌\",\n \"┐( ̄ヘ ̄)┌\",\n \"┐(´д)┌\",\n \"乁( . ര ʖ̯ ര . )ㄏ\",\n \"乁 ˘ o ˘ ㄏ\",\n \"乁ʕ •̀ •́ ʔㄏ\",\n r\"¯\\_(◕෴◕)_/¯\",\n r\"¯\\_〳 •̀ o •́ 〵_/¯\",\n \"乁║ ˙ 益 ˙ ║ㄏ\",\n ]\n\n BRAIN = [\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠 <(^_^ <)🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠 <(^_^ <) 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠 <(^_^ <) 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠 <(^_^ <) 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠 <(^_^ <) 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n🧠<(^_^ <) 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n(> ^_^)>🧠 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🧠 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🧠 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🧠 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🧠 🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🧠🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n (> ^_^)>🗑\",\n \"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\\n\\n <(^_^ <)🗑\",\n ]\n\n COCK = (\n \"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⠿⢿⣿⣿⠿⠛⠿⣿⣿⣿⣿⣿\\n\"\n \"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠟⠉⠄⣀⡤⢤⣤⣈⠁⣠⡔⠶⣾⣿⣿⣿\\n\"\n \"⣿⣿⣿⣿⣿⣿⣿⡿⠛⠋⠁⠄⠄⠄⣼⣿⠁⡀⢹⣿⣷⢹⡇⠄⠎⣿⣿⣿\\n\"\n \"⣿⣿⣿⠿⠛⠉⠁⠄⠄⠄⠄⠄⠄⠄⠹⣇⣀⣡⣾⣿⡿⠉⠛⠒⠒⠋⠉⢸\\n\"\n \"⡿⠋⠁⠄⠄⢀⣤⣤⡀⠄⠄⠄⠄⠄⠄⠈⠙⠛⠛⠉⠄⠄⠄⠄⠄⠄⠄⠈\\n\"\n \"⠄⠄⠄⠄⠄⢹⣧⡈⠿⣷⣄⣀⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⣠⢄⣾\\n\"\n \"⠄⠄⠄⠄⠄⠈⠻⢿⣶⣌⣙⡛⠛⠿⠶⠶⠶⠶⠶⠖⣒⣒⣚⣋⡩⢱⣾⣿\\n\"\n \"⠄⠄⠄⠄⠄⠄⠄⠄⠈⠉⠛⠛⠛⠻⠿⠿⠟⠛⠛⠛⠉⢉⣥⣶⣾⣿⣿⣿\\n\"\n \"⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠒⠶⣿⣿⣿⣿⣿⣿⣿⣿\\n\"\n \"⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠈⠻⣿⣿⣿⣿⣿⣿\\n\"\n \"⣿⡿⠛⠛⠛⢻⣿⠿⠛⠛⠛⢿⣿⣿⡿⠛⠛⠛⢻⡟⠛⣿⡿⠛⣻⣿⣿⣿\\n\"\n \"⡟⠄⣼⣿⣿⣿⡇⠄⣾⣿⣧⠄⢻⡏⠄⣼⣿⣿⣿⡇⠄⡟⢀⣴⣿⣿⣿⣿\\n\"\n \"⡇⠄⣿⣿⣿⣿⡄⠄⣿⣿⣿⠄⢸⡇⠄⣿⣿⣿⣿⡇⠄⣀⠈⢻⣿⣿⣿⣿\\n\"\n \"⣿⣄⠈⠙⠛⢻⣧⡄⠙⠛⠉⣠⣿⣷⣄⠈⠙⠛⢹⡇⠄⣿⣧⠄⠻⣿⣿⣿\\n\"\n )\n\n TABLE_FLIPS = [\n '(╯°Д°)╯︵/(.□ . \\)',\n '(˚Õ˚)ر ~~~~╚╩╩╝',\n '(ノಠ益ಠ)ノ彡┻━┻',\n '(╯°□°)╯︵ ┻━┻',\n '(┛◉Д◉)┛彡┻━┻',\n '┻━┻︵ \\(°□°)/ ︵ ┻━┻',\n '(┛ಠ_ಠ)┛彡┻━┻',\n '(╯°□°)╯︵ ʞooqǝɔɐℲ'\n ]"
}
] | import asyncio
import random
import requests
from pyrogram import *
from pyrogram import Client, filters
from pyrogram.errors.exceptions.flood_420 import FloodWait
from pyrogram.types import *
from pyrogram.types import Message
from Zaid.helper.basic import edit_or_reply, get_text
from Zaid.helper.constants import MEMES
from Zaid.modules.help import * | 9,519 |
DEFAULTUSER = "Man"
NOBLE = [
"╲╲╲┏━━┓╭━━━╮╱╱╱\n╲╲╲┗┓┏┛┃╭━╮┃╱╱╱\n╲╲╲╲┃┃┏┫┃╭┻┻┓╱╱\n╱╱╱┏╯╰╯┃╰┫┏━╯╱╱\n╱╱┏┻━┳┳┻━┫┗┓╱╱╱\n╱╱╰━┓┃┃╲┏┫┏┛╲╲╲\n╱╱╱╱┃╰╯╲┃┃┗━╮╲╲\n╱╱╱╱╰━━━╯╰━━┛╲╲",
"┏━╮\n┃▔┃▂▂┏━━┓┏━┳━━━┓\n┃▂┣━━┻━╮┃┃▂┃▂┏━╯\n┃▔┃▔╭╮▔┃┃┃▔┃▔┗━┓\n┃▂┃▂╰╯▂┃┗╯▂┃▂▂▂┃\n┃▔┗━━━╮┃▔▔▔┃▔┏━╯\n┃▂▂▂▂▂┣╯▂▂▂┃▂┗━╮\n┗━━━━━┻━━━━┻━━━┛",
"┏┓┏━┳━┳━┳━┓\n┃┗┫╋┣┓┃┏┫┻┫\n┗━┻━┛┗━┛┗━┛\n────YOU────",
"╦──╔╗─╗╔─╔ ─\n║──║║─║║─╠ ─\n╚═─╚╝─╚╝─╚ ─\n╦─╦─╔╗─╦╦ \n╚╦╝─║║─║║ \n─╩──╚╝─╚╝",
"╔══╗....<3 \n╚╗╔╝..('\../') \n╔╝╚╗..( •.• ) \n╚══╝..(,,)(,,) \n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"░I░L░O░V░E░Y░O░U░",
"┈┈╭━╱▔▔▔▔╲━╮┈┈┈\n┈┈╰╱╭▅╮╭▅╮╲╯┈┈┈\n╳┈┈▏╰┈▅▅┈╯▕┈┈┈┈\n┈┈┈╲┈╰━━╯┈╱┈┈╳┈\n┈┈┈╱╱▔╲╱▔╲╲┈┈┈┈\n┈╭━╮▔▏┊┊▕▔╭━╮┈╳\n┈┃┊┣▔╲┊┊╱▔┫┊┃┈┈\n┈╰━━━━╲╱━━━━╯┈╳",
"╔ღ═╗╔╗\n╚╗╔╝║║ღ═╦╦╦═ღ\n╔╝╚╗ღ╚╣║║║║╠╣\n╚═ღ╝╚═╩═╩ღ╩═╝",
"╔══╗ \n╚╗╔╝ \n╔╝(¯'v'¯) \n╚══'.¸./\n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"╔╗ \n║║╔═╦═╦═╦═╗ ╔╦╗ \n║╚╣╬╠╗║╔╣╩╣ ║║║ \n╚═╩═╝╚═╝╚═╝ ╚═╝ \n╔═╗ \n║═╬═╦╦╦═╦═╦═╦═╦═╗ \n║╔╣╬║╔╣╩╬╗║╔╣╩╣╔╝ \n╚╝╚═╩╝╚═╝╚═╝╚═╩╝",
"╔══╗ \n╚╗╔╝ \n╔╝╚╗ \n╚══╝ \n╔╗ \n║║╔═╦╦╦═╗ \n║╚╣║║║║╚╣ \n╚═╩═╩═╩═╝ \n╔╗╔╗ ♥️ \n║╚╝╠═╦╦╗ \n╚╗╔╣║║║║ \n═╚╝╚═╩═╝",
"╔══╗╔╗ ♡ \n╚╗╔╝║║╔═╦╦╦╔╗ \n╔╝╚╗║╚╣║║║║╔╣ \n╚══╝╚═╩═╩═╩═╝\n─────YOU─────",
"╭╮╭╮╮╭╮╮╭╮╮╭╮╮ \n┃┃╰╮╯╰╮╯╰╮╯╰╮╯ \n┃┃╭┳━━┳━╮╭━┳━━╮ \n┃┃┃┃╭╮┣╮┃┃╭┫╭╮┃ \n┃╰╯┃╰╯┃┃╰╯┃┃╰┻┻╮ \n╰━━┻━━╯╰━━╯╰━━━╯",
"┊┊╭━╮┊┊┊┊┊┊┊┊┊┊┊ \n━━╋━╯┊┊┊┊┊┊┊┊┊┊┊ \n┊┊┃┊╭━┳╮╭┓┊╭╮╭━╮ \n╭━╋━╋━╯┣╯┃┊┃╰╋━╯ \n╰━╯┊╰━━╯┊╰━┛┊╰━━",
]
R = "❤️"
W = "🤍"
heart_list = [
W * 9,
W * 2 + R * 2 + W + R * 2 + W * 2,
W + R * 7 + W,
W + R * 7 + W,
W + R * 7 + W,
W * 2 + R * 5 + W * 2,
W * 3 + R * 3 + W * 3,
W * 4 + R + W * 4,
W * 9,
]
joined_heart = "\n".join(heart_list)
heartlet_len = joined_heart.count(R)
SLEEP = 0.1
async def _wrap_edit(message, text: str):
"""Floodwait-safe utility wrapper for edit"""
try:
await message.edit(text)
except FloodWait as fl:
await asyncio.sleep(fl.x)
async def phase1(message):
"""Big scroll"""
BIG_SCROLL = "🧡💛💚💙💜🖤🤎"
await _wrap_edit(message, joined_heart)
for heart in BIG_SCROLL:
await _wrap_edit(message, joined_heart.replace(R, heart))
await asyncio.sleep(SLEEP)
async def phase2(message):
"""Per-heart randomiser"""
ALL = ["❤️"] + list("🧡💛💚💙💜🤎🖤") # don't include white heart
format_heart = joined_heart.replace(R, "{}")
for _ in range(5):
heart = format_heart.format(*random.choices(ALL, k=heartlet_len))
await _wrap_edit(message, heart)
await asyncio.sleep(SLEEP)
async def phase3(message):
"""Fill up heartlet matrix"""
await _wrap_edit(message, joined_heart)
await asyncio.sleep(SLEEP * 2)
repl = joined_heart
for _ in range(joined_heart.count(W)):
repl = repl.replace(W, R, 1)
await _wrap_edit(message, repl)
await asyncio.sleep(SLEEP)
async def phase4(message):
"""Matrix shrinking"""
for i in range(7, 0, -1):
heart_matrix = "\n".join([R * i] * i)
await _wrap_edit(message, heart_matrix)
await asyncio.sleep(SLEEP)
@Client.on_message(filters.command(["heart", "love"], ".") & filters.me)
async def hearts(client: Client, message: Message):
await phase1(message)
await asyncio.sleep(SLEEP * 3)
await message.edit("❤️ I")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love You")
await asyncio.sleep(3)
await message.edit("❤️ 𝐈 𝐋𝐎𝐕𝐄 𝐘𝐎𝐔 𝐉𝐀𝐀𝐍 <3")
@Client.on_message(
filters.me & (filters.command(["loveyou"], ".") | filters.regex("^loveyou "))
)
async def _(client: Client, message: Message):
noble = random.randint(1, len(NOBLE) - 2)
reply_text = NOBLE[noble]
|
DEFAULTUSER = "Man"
NOBLE = [
"╲╲╲┏━━┓╭━━━╮╱╱╱\n╲╲╲┗┓┏┛┃╭━╮┃╱╱╱\n╲╲╲╲┃┃┏┫┃╭┻┻┓╱╱\n╱╱╱┏╯╰╯┃╰┫┏━╯╱╱\n╱╱┏┻━┳┳┻━┫┗┓╱╱╱\n╱╱╰━┓┃┃╲┏┫┏┛╲╲╲\n╱╱╱╱┃╰╯╲┃┃┗━╮╲╲\n╱╱╱╱╰━━━╯╰━━┛╲╲",
"┏━╮\n┃▔┃▂▂┏━━┓┏━┳━━━┓\n┃▂┣━━┻━╮┃┃▂┃▂┏━╯\n┃▔┃▔╭╮▔┃┃┃▔┃▔┗━┓\n┃▂┃▂╰╯▂┃┗╯▂┃▂▂▂┃\n┃▔┗━━━╮┃▔▔▔┃▔┏━╯\n┃▂▂▂▂▂┣╯▂▂▂┃▂┗━╮\n┗━━━━━┻━━━━┻━━━┛",
"┏┓┏━┳━┳━┳━┓\n┃┗┫╋┣┓┃┏┫┻┫\n┗━┻━┛┗━┛┗━┛\n────YOU────",
"╦──╔╗─╗╔─╔ ─\n║──║║─║║─╠ ─\n╚═─╚╝─╚╝─╚ ─\n╦─╦─╔╗─╦╦ \n╚╦╝─║║─║║ \n─╩──╚╝─╚╝",
"╔══╗....<3 \n╚╗╔╝..('\../') \n╔╝╚╗..( •.• ) \n╚══╝..(,,)(,,) \n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"░I░L░O░V░E░Y░O░U░",
"┈┈╭━╱▔▔▔▔╲━╮┈┈┈\n┈┈╰╱╭▅╮╭▅╮╲╯┈┈┈\n╳┈┈▏╰┈▅▅┈╯▕┈┈┈┈\n┈┈┈╲┈╰━━╯┈╱┈┈╳┈\n┈┈┈╱╱▔╲╱▔╲╲┈┈┈┈\n┈╭━╮▔▏┊┊▕▔╭━╮┈╳\n┈┃┊┣▔╲┊┊╱▔┫┊┃┈┈\n┈╰━━━━╲╱━━━━╯┈╳",
"╔ღ═╗╔╗\n╚╗╔╝║║ღ═╦╦╦═ღ\n╔╝╚╗ღ╚╣║║║║╠╣\n╚═ღ╝╚═╩═╩ღ╩═╝",
"╔══╗ \n╚╗╔╝ \n╔╝(¯'v'¯) \n╚══'.¸./\n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"╔╗ \n║║╔═╦═╦═╦═╗ ╔╦╗ \n║╚╣╬╠╗║╔╣╩╣ ║║║ \n╚═╩═╝╚═╝╚═╝ ╚═╝ \n╔═╗ \n║═╬═╦╦╦═╦═╦═╦═╦═╗ \n║╔╣╬║╔╣╩╬╗║╔╣╩╣╔╝ \n╚╝╚═╩╝╚═╝╚═╝╚═╩╝",
"╔══╗ \n╚╗╔╝ \n╔╝╚╗ \n╚══╝ \n╔╗ \n║║╔═╦╦╦═╗ \n║╚╣║║║║╚╣ \n╚═╩═╩═╩═╝ \n╔╗╔╗ ♥️ \n║╚╝╠═╦╦╗ \n╚╗╔╣║║║║ \n═╚╝╚═╩═╝",
"╔══╗╔╗ ♡ \n╚╗╔╝║║╔═╦╦╦╔╗ \n╔╝╚╗║╚╣║║║║╔╣ \n╚══╝╚═╩═╩═╩═╝\n─────YOU─────",
"╭╮╭╮╮╭╮╮╭╮╮╭╮╮ \n┃┃╰╮╯╰╮╯╰╮╯╰╮╯ \n┃┃╭┳━━┳━╮╭━┳━━╮ \n┃┃┃┃╭╮┣╮┃┃╭┫╭╮┃ \n┃╰╯┃╰╯┃┃╰╯┃┃╰┻┻╮ \n╰━━┻━━╯╰━━╯╰━━━╯",
"┊┊╭━╮┊┊┊┊┊┊┊┊┊┊┊ \n━━╋━╯┊┊┊┊┊┊┊┊┊┊┊ \n┊┊┃┊╭━┳╮╭┓┊╭╮╭━╮ \n╭━╋━╋━╯┣╯┃┊┃╰╋━╯ \n╰━╯┊╰━━╯┊╰━┛┊╰━━",
]
R = "❤️"
W = "🤍"
heart_list = [
W * 9,
W * 2 + R * 2 + W + R * 2 + W * 2,
W + R * 7 + W,
W + R * 7 + W,
W + R * 7 + W,
W * 2 + R * 5 + W * 2,
W * 3 + R * 3 + W * 3,
W * 4 + R + W * 4,
W * 9,
]
joined_heart = "\n".join(heart_list)
heartlet_len = joined_heart.count(R)
SLEEP = 0.1
async def _wrap_edit(message, text: str):
"""Floodwait-safe utility wrapper for edit"""
try:
await message.edit(text)
except FloodWait as fl:
await asyncio.sleep(fl.x)
async def phase1(message):
"""Big scroll"""
BIG_SCROLL = "🧡💛💚💙💜🖤🤎"
await _wrap_edit(message, joined_heart)
for heart in BIG_SCROLL:
await _wrap_edit(message, joined_heart.replace(R, heart))
await asyncio.sleep(SLEEP)
async def phase2(message):
"""Per-heart randomiser"""
ALL = ["❤️"] + list("🧡💛💚💙💜🤎🖤") # don't include white heart
format_heart = joined_heart.replace(R, "{}")
for _ in range(5):
heart = format_heart.format(*random.choices(ALL, k=heartlet_len))
await _wrap_edit(message, heart)
await asyncio.sleep(SLEEP)
async def phase3(message):
"""Fill up heartlet matrix"""
await _wrap_edit(message, joined_heart)
await asyncio.sleep(SLEEP * 2)
repl = joined_heart
for _ in range(joined_heart.count(W)):
repl = repl.replace(W, R, 1)
await _wrap_edit(message, repl)
await asyncio.sleep(SLEEP)
async def phase4(message):
"""Matrix shrinking"""
for i in range(7, 0, -1):
heart_matrix = "\n".join([R * i] * i)
await _wrap_edit(message, heart_matrix)
await asyncio.sleep(SLEEP)
@Client.on_message(filters.command(["heart", "love"], ".") & filters.me)
async def hearts(client: Client, message: Message):
await phase1(message)
await asyncio.sleep(SLEEP * 3)
await message.edit("❤️ I")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love You")
await asyncio.sleep(3)
await message.edit("❤️ 𝐈 𝐋𝐎𝐕𝐄 𝐘𝐎𝐔 𝐉𝐀𝐀𝐍 <3")
@Client.on_message(
filters.me & (filters.command(["loveyou"], ".") | filters.regex("^loveyou "))
)
async def _(client: Client, message: Message):
noble = random.randint(1, len(NOBLE) - 2)
reply_text = NOBLE[noble] | await edit_or_reply(message, reply_text) | 0 | 2023-11-13 18:19:50+00:00 | 12k |
atlantic-quantum/Shipyard | shipyard/passes/insert_ct_waveforms.py | [
{
"identifier": "LOGGER",
"path": "shipyard/logger.py",
"snippet": "LOGGER = logging.getLogger(\"Compiler\")"
},
{
"identifier": "LazyRepr",
"path": "shipyard/utilities.py",
"snippet": "class LazyRepr:\n \"\"\"\n wrap representation for lazy evaluation in logging.\n based of https://stackoverflow.com/a/60072502\n \"\"\"\n\n def __init__(self, callback: callable, args: list):\n self.callback = callback\n self.args = args\n\n def __repr__(self):\n return repr(self.callback(*self.args))"
},
{
"identifier": "GenericTransformer",
"path": "shipyard/visitors/generic_transformer.py",
"snippet": "class GenericTransformer(QASMTransformer):\n def _visit_list(\n self, nodes: list[ast.QASMNode], visit_function: callable, context=None\n ) -> list[ast.QASMNode]:\n new_nodes = []\n for node in nodes:\n new_node = visit_function(node)\n if new_node:\n new_nodes.append(new_node)\n return new_nodes\n\n def _visit_list_flatten(\n self, nodes: list[ast.QASMNode], visit_function: callable, context=None\n ) -> list[ast.QASMNode]:\n flat_nodes = []\n for node in nodes:\n new_node = visit_function(node)\n if new_node:\n flat_nodes.extend(\n new_node if isinstance(new_node, list) else [new_node]\n )\n return flat_nodes\n\n # return [node for node in flat_nodes if node]\n\n def visit_Program(self, node: ast.Program, context=None) -> ast.Program:\n \"\"\"\n An entire OpenQASM 3 program represented by a list of top level statements\n \"\"\"\n node.statements = self._visit_list(node.statements, self.visit)\n return node\n\n def visit_Annotation(self, node: ast.Annotation, context=None) -> ast.Annotation:\n \"\"\"An annotation applied to a statment.\"\"\"\n return node\n\n def visit_Statement(self, node: ast.Statement, context=None) -> ast.Statement:\n \"\"\"A statement: anything that can appear on its own line\"\"\"\n node.annotations = self._visit_list(node.annotations, self.visit)\n return node\n\n def visit_Include(\n self, node: ast.Include, context=None\n ) -> ast.Include | list[ast.Statement]:\n \"\"\"\n An include statement\n \"\"\"\n node = self.visit_Statement(node)\n return node\n\n def visit_ExpressionStatement(\n self, node: ast.ExpressionStatement, context=None\n ) -> ast.ExpressionStatement:\n \"\"\"A statement that contains a single expression\"\"\"\n node = self.visit_Statement(node)\n node.expression = self.visit(node.expression)\n return node\n\n # Note that QubitDeclaration is not a valid QuantumStatement, because qubits\n # can only be declared in global scopes, not in gates.\n def visit_QubitDeclaration(\n self, node: ast.QubitDeclaration, context=None\n ) -> ast.QubitDeclaration:\n \"\"\"\n Global qubit declaration\n\n Example::\n\n qubit q;\n qubit[4] q;\n\n q // <- qubit\n 4 // <- size\n\n \"\"\"\n node = self.visit_Statement(node)\n node.qubit = self.visit_Identifier(node.qubit)\n node.size = self.visit(node.size) if node.size else None\n return node\n\n def visit_QuantumGateDefinition(\n self, node: ast.QuantumGateDefinition, context=None\n ) -> ast.QuantumGateDefinition:\n \"\"\"\n Define a new quantum gate\n\n Example::\n\n gate cx c, t {\n ctrl @ unitary(pi, 0, pi) c, t;\n }\n\n \"\"\"\n node = self.visit_Statement(node)\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit_Identifier)\n node.qubits = self._visit_list(node.qubits, self.visit_Identifier)\n node.body = self._visit_list(node.body, self.visit)\n return node\n\n def visit_QuantumStatement(\n self, node: ast.QuantumStatement, context=None\n ) -> ast.QuantumStatement:\n \"\"\"Statements that may appear inside a gate declaration\"\"\"\n node = self.visit_Statement(node)\n return node\n\n def visit_ExternDeclaration(\n self, node: ast.ExternDeclaration, context=None\n ) -> ast.ExternDeclaration:\n \"\"\"\n A extern declaration\n\n Example::\n\n extern get_pauli(int[prec], context=None) -> bit[2 * n];\n\n get_pauli // <- name\n int[prec] // <- classical type\n bit[2 * n] // <- return type\n\n \"\"\"\n node = self.visit_Statement(node)\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit)\n if node.return_type:\n node.return_type = self.visit(node.return_type)\n return node\n\n def visit_Expression(self, node: ast.Expression, context=None) -> ast.Expression:\n \"\"\"An expression: anything that returns a value\"\"\"\n return node\n\n def visit_Identifier(self, node: ast.Identifier, context=None) -> ast.Identifier:\n \"\"\"\n An identifier\n\n Example::\n\n q1\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_UnaryExpression(\n self, node: ast.UnaryExpression, context=None\n ) -> ast.UnaryExpression:\n \"\"\"\n A unary expression\n\n Example::\n\n ~b\n !bool\n -i\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_BinaryExpression(\n self, node: ast.BinaryExpression, context=None\n ) -> ast.BinaryExpression:\n \"\"\"\n A binary expression\n\n Example::\n\n q1 || q2\n\n \"\"\"\n node = self.visit_Expression(node)\n node.lhs = self.visit(node.lhs)\n node.rhs = self.visit(node.rhs)\n return node\n\n def visit_IntegerLiteral(\n self, node: ast.IntegerLiteral, context=None\n ) -> ast.IntegerLiteral:\n \"\"\"\n An integer literal\n\n Example::\n\n 1\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_FloatLiteral(\n self, node: ast.FloatLiteral, context=None\n ) -> ast.FloatLiteral:\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_ImaginaryLiteral(\n self, node: ast.ImaginaryLiteral, context=None\n ) -> ast.ImaginaryLiteral:\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1im\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_BooleanLiteral(\n self, node: ast.BooleanLiteral, context=None\n ) -> ast.BooleanLiteral:\n \"\"\"\n A boolean expression\n\n Example::\n\n true\n false\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_BitstringLiteral(\n self, node: ast.BitstringLiteral, context=None\n ) -> ast.BitstringLiteral:\n \"\"\"A literal bitstring value. The ``value`` is the numerical value of the\n bitstring, and the ``width`` is the number of digits given.\"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_DurationLiteral(\n self, node: ast.DurationLiteral, context=None\n ) -> ast.DurationLiteral:\n \"\"\"\n A duration literal\n\n Example::\n\n 1.0ns\n\n \"\"\"\n node = self.visit_Expression(node)\n return node\n\n def visit_ArrayLiteral(\n self, node: ast.ArrayLiteral, context=None\n ) -> ast.ArrayLiteral:\n \"\"\"Array literal, used to initialise declared arrays.\n\n For example::\n\n array[uint[8], 2] row = {1, 2};\n array[uint[8], 2, 2] my_array = {{1, 2}, {3, 4}};\n array[uint[8], 2, 2] my_array = {row, row};\n \"\"\"\n node = self.visit_Expression(node)\n node.values = self._visit_list(node.values, self.visit)\n return node\n\n def visit_FunctionCall(\n self, node: ast.FunctionCall, context=None\n ) -> ast.FunctionCall:\n \"\"\"\n A function call expression\n\n Example::\n\n foo(1)\n\n foo // <- name\n\n \"\"\"\n node = self.visit_Expression(node)\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit)\n return node\n\n def visit_Cast(self, node: ast.Cast, context=None) -> ast.Cast:\n \"\"\"\n A cast call expression\n\n Example::\n\n counts += int[1](b);\n\n \"\"\"\n node = self.visit_Expression(node)\n node.type = self.visit(node.type)\n node.argument = self.visit(node.argument)\n return node\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet, context=None) -> ast.DiscreteSet:\n \"\"\"\n A set of discrete values. This can be used for the values in a ``for``\n loop, or to index certain values out of a register::\n\n for i in {1, 2, 3} {}\n let alias = qubits[{2, 3, 4}];\n \"\"\"\n node.values = self._visit_list(node.values, self.visit)\n return node\n\n def visit_RangeDefinition(\n self, node: ast.RangeDefinition, context=None\n ) -> ast.RangeDefinition:\n \"\"\"\n Range definition.\n\n Example::\n\n 1:2\n 1:1:10\n :\n \"\"\"\n if node.start:\n node.start = self.visit(node.start)\n if node.end:\n node.end = self.visit(node.end)\n if node.step:\n node.step = self.visit(node.step)\n return node\n\n IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition]\n\n def _visit_IndexElement(self, node: IndexElement, context=None) -> IndexElement:\n if isinstance(node, list):\n return self._visit_list(node, self.visit)\n return self.visit(node)\n\n def visit_IndexExpression(\n self, node: ast.IndexExpression, context=None\n ) -> ast.IndexExpression:\n \"\"\"\n An index expression.\n\n Example::\n\n q[1]\n \"\"\"\n node = self.visit_Expression(node)\n node.collection = self.visit(node.collection)\n node.index = self._visit_IndexElement(node.index)\n return node\n\n def visit_IndexedIdentifier(\n self, node: ast.IndexedIdentifier, context=None\n ) -> ast.IndexedIdentifier:\n \"\"\"An indentifier with index operators, such that it can be used as an\n lvalue. The list of indices is subsequent index brackets, so in::\n\n a[{1, 2, 3}][0:1, 0:1]\n\n the list of indices will have two elements. The first will be a\n :class:`.DiscreteSet`, and the second will be a list of two\n :class:`.RangeDefinition`\\\\ s.\n \"\"\"\n node.name = self.visit_Identifier(node.name)\n node.indices = self._visit_list(node.indices, self._visit_IndexElement)\n return node\n\n def visit_Concatenation(\n self, node: ast.Concatenation, context=None\n ) -> ast.Concatenation:\n \"\"\"\n Concatenation of two registers, for example::\n\n a ++ b\n a[2:3] ++ a[0:1]\n \"\"\"\n node = self.visit_Expression(node)\n node.lhs = self.visit(node.lhs)\n node.rhs = self.visit(node.rhs)\n return node\n\n def visit_QuantumGate(self, node: ast.QuantumGate, context=None) -> ast.QuantumGate:\n \"\"\"\n Invoking a quantum gate\n\n Example::\n cx[dur] 0, 1;\n\n or\n\n ctrl @ p(λ) a, b;\n\n ctrl @ // <- quantumGateModifier\n p // <- quantumGateName\n λ // <- argument\n a, b // <- qubit\n \"\"\"\n node = self.visit_QuantumStatement(node)\n node.modifiers = self._visit_list(\n node.modifiers, self.visit_QuantumGateModifier\n )\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit)\n node.qubits = self._visit_list(node.qubits, self.visit)\n if node.duration:\n node.duration = self.visit(node.duration)\n return node\n\n def visit_QuantumGateModifier(\n self, node: ast.QuantumGateModifier, context=None\n ) -> ast.QuantumGateModifier:\n \"\"\"\n A quantum gate modifier\n\n Attributes:\n modifier: 'inv', 'pow', or 'ctrl'\n expression: only pow modifier has expression.\n\n Example::\n\n inv @\n pow(1/2)\n ctrl\n \"\"\"\n if node.argument:\n node.argument = self.visit(node.argument)\n return node\n\n def visit_QuantumPhase(\n self, node: ast.QuantumPhase, context=None\n ) -> ast.QuantumPhase:\n \"\"\"\n A quantum phase instruction\n\n Example::\n\n ctrl @ gphase(λ) a;\n\n ctrl @ // <- quantumGateModifier\n λ // <- argument\n a // <- qubit\n\n \"\"\"\n node = self.visit_QuantumStatement(node)\n node.modifiers = self._visit_list(\n node.modifiers, self.visit_QuantumGateModifier\n )\n node.argument = self.visit(node.argument)\n node.qubits = self._visit_list(node.qubits, self.visit)\n return node\n\n # Not a full expression because it can only be used in limited contexts.\n def visit_QuantumMeasurement(\n self, node: ast.QuantumMeasurement, context=None\n ) -> ast.QuantumMeasurement:\n \"\"\"\n A quantum measurement instruction\n\n Example::\n\n measure q;\n \"\"\"\n node.qubit = self.visit(node.qubit)\n return node\n\n # Note that this is not a QuantumStatement because it involves access to\n # classical bits.\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement, context=None\n ) -> ast.QuantumMeasurementStatement:\n \"\"\"Stand-alone statement of a quantum measurement, potentially assigning the\n result to a classical variable. This is not the only statement that\n `measure` can appear in (it can also be in classical declaration statements\n and returns).\"\"\"\n node = self.visit_Statement(node)\n node.measure = self.visit_QuantumMeasurement(node.measure)\n if node.target:\n node.target = self.visit(node.target)\n return node\n\n def visit_QuantumBarrier(\n self, node: ast.QuantumBarrier, context=None\n ) -> ast.QuantumBarrier:\n \"\"\"\n A quantum barrier instruction\n\n Example::\n\n barrier q;\n \"\"\"\n node = self.visit_QuantumStatement(node)\n node.qubits = self._visit_list(node.qubits, self.visit)\n return node\n\n def visit_QuantumReset(\n self, node: ast.QuantumReset, context=None\n ) -> ast.QuantumReset:\n \"\"\"\n A reset instruction.\n\n Example::\n\n reset q;\n \"\"\"\n\n node = self.visit_QuantumStatement(node)\n node.qubits = self.visit(node.qubits)\n return node\n\n def visit_ClassicalArgument(\n self, node: ast.ClassicalArgument, context=None\n ) -> ast.ClassicalArgument:\n \"\"\"\n Classical argument for a gate or subroutine declaration\n \"\"\"\n node.type = self.visit(node.type)\n node.name = self.visit_Identifier(node.name)\n return node\n\n def visit_ExternArgument(\n self, node: ast.ExternArgument, context=None\n ) -> ast.ExternArgument:\n \"\"\"Classical argument for an extern declaration.\"\"\"\n\n node.type = self.visit(node.type)\n return node\n\n def visit_ClassicalDeclaration(\n self, node: ast.ClassicalDeclaration, context=None\n ) -> ast.ClassicalDeclaration:\n \"\"\"\n Classical variable declaration\n\n Example::\n\n bit c;\n \"\"\"\n\n node = self.visit_Statement(node)\n node.type = self.visit(node.type)\n node.identifier = self.visit_Identifier(node.identifier)\n if node.init_expression:\n node.init_expression = self.visit(node.init_expression)\n return node\n\n def visit_IODeclaration(\n self, node: ast.IODeclaration, context=None\n ) -> ast.IODeclaration:\n \"\"\"\n Input/output variable declaration\n\n Exampe::\n\n input angle[16] theta;\n output bit select;\n \"\"\"\n node = self.visit_Statement(node)\n node.type = self.visit(node.type)\n node.identifier = self.visit_Identifier(node.identifier)\n return node\n\n def visit_ConstantDeclaration(\n self, node: ast.ConstantDeclaration, context=None\n ) -> ast.ConstantDeclaration:\n \"\"\"\n Constant declaration\n\n Example::\n\n const int[16] n = 10;\n \"\"\"\n node = self.visit_Statement(node)\n node.type = self.visit(node.type)\n node.identifier = self.visit_Identifier(node.identifier)\n node.init_expression = self.visit(node.init_expression)\n return node\n\n def visit_ClassicalType(\n self, node: ast.ClassicalType, context=None\n ) -> ast.ClassicalType:\n \"\"\"\n Base class for classical type\n \"\"\"\n return node\n\n def visit_IntType(self, node: ast.IntType, context=None) -> ast.IntType:\n \"\"\"\n Node representing a classical ``int`` (signed integer) type, with an\n optional precision.\n\n Example:\n\n int[8]\n int[16]\n \"\"\"\n node = self.visit_ClassicalType(node)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_UintType(self, node: ast.UintType, context=None) -> ast.UintType:\n \"\"\"\n Node representing a classical ``uint`` (unsigned integer) type, with an\n optional precision.\n\n Example:\n\n uint[8]\n uint[16]\n \"\"\"\n\n node = self.visit_ClassicalType(node)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_FloatType(self, node: ast.FloatType, context=None) -> ast.FloatType:\n \"\"\"\n Node representing the classical ``float`` type, with the particular IEEE-754\n floating-point size optionally specified.\n\n Example:\n\n float[16]\n float[64]\n \"\"\"\n node = self.visit_ClassicalType(node)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_ComplexType(self, node: ast.ComplexType, context=None) -> ast.ComplexType:\n \"\"\"\n Complex ClassicalType. Its real and imaginary parts are based on other\n classical types.\n\n Example::\n\n complex[float]\n complex[float[32]]\n \"\"\"\n node = self.visit_ClassicalType(node)\n if node.base_type:\n node.base_type = self.visit(node.base_type)\n return node\n\n def visit_AngleType(self, node: ast.AngleType, context=None) -> ast.AngleType:\n \"\"\"\n Node representing the classical ``angle`` type, with an optional precision.\n\n Example::\n\n angle[8]\n angle[16]\n \"\"\"\n node = self.visit_ClassicalType(node)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_BitType(self, node: ast.BitType, context=None) -> ast.BitType:\n \"\"\"\n Node representing the classical ``bit`` type, with an optional size.\n\n Example::\n\n bit[8]\n creg[8]\n \"\"\"\n node = self.visit_ClassicalType(node)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_BoolType(self, node: ast.BoolType, context=None) -> ast.BoolType:\n \"\"\"\n Leaf node representing the Boolean classical type.\n \"\"\"\n node = self.visit_ClassicalType(node)\n return node\n\n def visit_ArrayType(self, node: ast.ArrayType, context=None) -> ast.ArrayType:\n \"\"\"Type of arrays that include allocation of the storage.\n\n This is generally any array declared as a standard statement, but not\n arrays declared by being arguments to subroutines.\n \"\"\"\n node = self.visit_ClassicalType(node)\n node.base_type = self.visit(node.base_type)\n node.dimensions = self._visit_list(node.dimensions, self.visit)\n return node\n\n def visit_ArrayReferenceType(\n self, node: ast.ArrayReferenceType, context=None\n ) -> ast.ArrayReferenceType:\n \"\"\"Type of arrays that are a reference to an array with allocated storage.\n\n This is generally any array declared as a subroutine argument. The\n dimensions can be either a list of expressions (one for each dimension), or\n a single expression, which is the number of dimensions.\n\n For example::\n\n // `a` will have dimensions `[IntegerLiteral(2)]` (with a list), because\n // it is a 1D array, with a length of 2.\n def f(const array[uint[8], 2] a) {}\n // `b` will have dimension `IntegerLiteral(3)` (no list), because it is\n // a 3D array, but we don't know the lengths of its dimensions.\n def f(const array[uint[8], #dim=3] b) {}\n \"\"\"\n\n node = self.visit_ClassicalType(node)\n node.base_type = self.visit(node.base_type)\n node.dimensions = (\n self._visit_list(node.dimensions, self.visit)\n if isinstance(node.dimensions, list)\n else self.visit(node.dimensions)\n )\n return node\n\n def visit_DurationType(\n self, node: ast.DurationType, context=None\n ) -> ast.DurationType:\n \"\"\"\n Leaf node representing the ``duration`` type.\n \"\"\"\n node = self.visit_ClassicalType(node)\n return node\n\n def visit_StretchType(self, node: ast.StretchType, context=None) -> ast.StretchType:\n \"\"\"\n Leaf node representing the ``stretch`` type.\n \"\"\"\n node = self.visit_ClassicalType(node)\n return node\n\n def visit_CalibrationGrammarDeclaration(\n self, node: ast.CalibrationGrammarDeclaration, context=None\n ) -> ast.CalibrationGrammarDeclaration:\n \"\"\"\n Calibration grammar declaration\n\n Example::\n\n defcalgrammar \"openpulse\";\n \"\"\"\n return node\n\n def visit_CalibrationStatement(\n self, node: ast.CalibrationStatement, context=None\n ) -> ast.CalibrationStatement:\n \"\"\"An inline ``cal`` statement for embedded pulse-grammar interactions.\n\n Example::\n\n cal {\n shift_phase(drive($0), theta);\n }\n \"\"\"\n node = self.visit_Statement(node)\n node.body = self._visit_list(node.body, self.visit)\n return node\n\n def visit_CalibrationBlock(\n self, node: ast.CalibrationBlock, context=None\n ) -> ast.CalibrationBlock:\n node.body = self._visit_list(node.body, self.visit)\n return node\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition, context=None\n ) -> ast.CalibrationDefinition:\n \"\"\"\n Calibration definition\n\n Example::\n\n defcal rz(angle[20] theta) q {\n shift_phase drive(q), -theta;\n }\n \"\"\"\n node = self.visit_Statement(node)\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit)\n node.qubits = self._visit_list(node.qubits, self.visit_Identifier)\n node.body = self._visit_list(node.body, self.visit)\n if node.return_type:\n node.return_type = self.visit(node.return_type)\n return node\n\n def visit_SubroutineDefinition(\n self, node: ast.SubroutineDefinition, context=None\n ) -> ast.SubroutineDefinition:\n \"\"\"\n Subroutine definition\n\n Example::\n\n def measure(qubit q, context=None) -> bit {\n s q;\n h q;\n return measure q;\n }\n \"\"\"\n node = self.visit_Statement(node)\n node.name = self.visit_Identifier(node.name)\n node.arguments = self._visit_list(node.arguments, self.visit)\n node.body = self._visit_list(node.body, self.visit)\n if node.return_type:\n node.return_type = self.visit(node.return_type)\n return node\n\n def visit_QuantumArgument(\n self, node: ast.QuantumArgument, context=None\n ) -> ast.QuantumArgument:\n \"\"\"\n Quantum argument for a subroutine declaration\n \"\"\"\n node.name = self.visit_Identifier(node.name)\n if node.size:\n node.size = self.visit(node.size)\n return node\n\n def visit_ReturnStatement(\n self, node: ast.ReturnStatement, context=None\n ) -> ast.ReturnStatement:\n \"\"\"\n Classical or quantum return statement\n\n Example::\n\n return measure q;\n\n return a + b\n\n \"\"\"\n node = self.visit_Statement(node)\n if node.expression:\n node.expression = self.visit(node.expression)\n return node\n\n def visit_BreakStatement(\n self, node: ast.BreakStatement, context=None\n ) -> ast.BreakStatement:\n \"\"\"\n Break statement\n\n Example::\n\n break;\n \"\"\"\n node = self.visit_Statement(node)\n return node\n\n def visit_ContinueStatement(\n self, node: ast.ContinueStatement, context=None\n ) -> ast.ContinueStatement:\n \"\"\"\n Continue statement\n\n Example::\n\n continue;\n \"\"\"\n node = self.visit_Statement(node)\n return node\n\n def visit_EndStatement(\n self, node: ast.EndStatement, context=None\n ) -> ast.EndStatement:\n \"\"\"\n End statement\n\n Example::\n\n end;\n \"\"\"\n node = self.visit_Statement(node)\n return node\n\n def visit_BranchingStatement(\n self, node: ast.BranchingStatement, context=None\n ) -> ast.Statement:\n \"\"\"\n Branch (``if``) statement\n\n Example::\n\n if (temp == 1) {\n ry(-pi / 2) scratch[0];\n } else continue;\n \"\"\"\n node = self.visit_Statement(node)\n node.condition = self.visit(node.condition)\n node.if_block = self._visit_list(node.if_block, self.visit)\n node.else_block = self._visit_list(node.else_block, self.visit)\n return node\n\n def visit_WhileLoop(self, node: ast.WhileLoop, context=None) -> ast.WhileLoop:\n \"\"\"\n While loop\n\n Example::\n\n while(~success) {\n reset magic;\n ry(pi / 4) magic;\n success = distill(magic, scratch);\n }\n \"\"\"\n node = self.visit_Statement(node)\n node.while_condition = self.visit(node.while_condition)\n node.block = self._visit_list(node.block, self.visit)\n return node\n\n def visit_ForInLoop(self, node: ast.ForInLoop, context=None) -> ast.ForInLoop:\n \"\"\"\n For in loop\n\n Example::\n\n for i in [0: 2] {\n majority a[i], b[i + 1], a[i + 1];\n }\n \"\"\"\n node = self.visit_Statement(node)\n node.type = self.visit(node.type)\n node.identifier = self.visit_Identifier(node.identifier)\n node.set_declaration = self.visit(node.set_declaration)\n node.block = self._visit_list(node.block, self.visit)\n return node\n\n def visit_DelayInstruction(\n self, node: ast.DelayInstruction, context=None\n ) -> ast.DelayInstruction:\n \"\"\"\n Delay instruction\n\n Example::\n\n delay[start_stretch] $0;\n \"\"\"\n node = self.visit_QuantumStatement(node)\n node.duration = self.visit(node.duration)\n node.qubits = self._visit_list(node.qubits, self.visit)\n return node\n\n def visit_Box(self, node: ast.Box, context=None) -> ast.Box:\n \"\"\"\n Timing box\n\n Example::\n\n box [maxdur] {\n delay[start_stretch] $0;\n x $0;\n }\n \"\"\"\n node = self.visit_QuantumStatement(node)\n node.duration = self.visit(node.duration) if node.duration else None\n node.body = self._visit_list(node.body, self.visit)\n return node\n\n def visit_DurationOf(self, node: ast.DurationOf, context=None) -> ast.DurationOf:\n \"\"\"\n Duration Of\n\n Example::\n\n durationof({x $0;})\n \"\"\"\n node = self.visit_Expression(node)\n node.target = self._visit_list(node.target, self.visit)\n return node\n\n def visit_SizeOf(self, node: ast.SizeOf, context=None) -> ast.SizeOf:\n \"\"\"``sizeof`` an array's dimensions.\"\"\"\n node = self.visit_Expression(node)\n node.target = self.visit(node.target)\n if node.index:\n node.index = self.visit(node.index)\n return node\n\n def visit_AliasStatement(\n self, node: ast.AliasStatement, context=None\n ) -> ast.AliasStatement:\n \"\"\"\n Alias statement\n\n Example::\n\n let a = qubits[0];\n\n \"\"\"\n node = self.visit_Statement(node)\n node.target = self.visit_Identifier(node.target)\n node.value = self.visit(node.value)\n return node\n\n def visit_ClassicalAssignment(\n self, node: ast.ClassicalAssignment, context=None\n ) -> ast.ClassicalAssignment:\n \"\"\"\n Classical assignment\n\n Example::\n\n a[0] = 1;\n \"\"\"\n node = self.visit_Statement(node)\n node.lvalue = self.visit(node.lvalue)\n node.rvalue = self.visit(node.rvalue)\n return node\n\n def visit_Pragma(self, node: ast.Pragma, context=None) -> ast.Pragma:\n \"\"\"\n Pragma\n Example::\n\n #pragma val1 val2 val3\n \"\"\"\n return node\n\n def visit_WaveformType(\n self, node: ast.WaveformType, context=None\n ) -> ast.WaveformType:\n node = self.visit_ClassicalType(node)\n return node\n\n def visit_PortType(self, node: ast.PortType, context=None) -> ast.PortType:\n node = self.visit_ClassicalType(node)\n return node\n\n def visit_FrameType(self, node: ast.FrameType, context=None) -> ast.FrameType:\n node = self.visit_ClassicalType(node)\n return node"
}
] | from openpulse import ast
from openpulse.printer import dumps as qasm_dumps
from zhinst.toolkit import CommandTable
from ..logger import LOGGER
from ..utilities import LazyRepr
from ..visitors import GenericTransformer as QASMTransformer | 8,474 |
class InsertCTWaveforms(QASMTransformer):
"""
QASMTransformer to add in assignWaveIndex(placeholder(length), index) statements
for each waveform in the command table
Args:
CommandTable:
ZI CommandTable object
Returns:
list[ast.Statement]:
A list of QASM statements
"""
def __init__(self, commandtable: CommandTable | None) -> None:
self.ct = commandtable or {}
@staticmethod
def add_assignWaveIndex(
waveform_set: set(tuple[int, int])
) -> ast.CalibrationStatement:
"""
Create list of openQASM statements to of
assignWaveIndex(placeholder(length), index) for each waveform in the
waveform_set
Args:
waveform_set (set(tuple[int, int])):
A set of tuples of waveform index and length
Returns:
list[ast.Statement]:
A list of QASM statements
"""
awi_statments = [
ast.FunctionCall(
name=ast.Identifier("assignWaveIndex"),
arguments=[
ast.FunctionCall(
name=ast.Identifier("placeholder"),
arguments=[ast.IntegerLiteral(length)],
),
ast.IntegerLiteral(index),
],
)
for (index, length) in waveform_set
]
return ast.CalibrationStatement(
body=[ast.ExpressionStatement(awi) for awi in awi_statments]
)
# pylint: disable=C0103
# snake_case naming
def visit_Program(self, node: ast.Program):
"""
Program node transformer:
inserts assignWaveformIndex and placeholder statememnets at the beginning
of the program
Args:
node (ast.Program): openQASM program to process
Returns:
ast.Program: same node with waveform declarations inserted
"""
if self.ct:
i = 0
waveform_set = set()
while (
self.ct.table[i].waveform.index is not None
and self.ct.table[i].waveform.length is not None
):
# iterating over the command table items ran indices that were out of
# the bounds of the json schema, could not use for loop/ list
# comprehension
waveform_set.add(
(self.ct.table[i].waveform.index, self.ct.table[i].waveform.length)
)
i += 1
node.statements.insert(1, self.add_assignWaveIndex(waveform_set))
|
class InsertCTWaveforms(QASMTransformer):
"""
QASMTransformer to add in assignWaveIndex(placeholder(length), index) statements
for each waveform in the command table
Args:
CommandTable:
ZI CommandTable object
Returns:
list[ast.Statement]:
A list of QASM statements
"""
def __init__(self, commandtable: CommandTable | None) -> None:
self.ct = commandtable or {}
@staticmethod
def add_assignWaveIndex(
waveform_set: set(tuple[int, int])
) -> ast.CalibrationStatement:
"""
Create list of openQASM statements to of
assignWaveIndex(placeholder(length), index) for each waveform in the
waveform_set
Args:
waveform_set (set(tuple[int, int])):
A set of tuples of waveform index and length
Returns:
list[ast.Statement]:
A list of QASM statements
"""
awi_statments = [
ast.FunctionCall(
name=ast.Identifier("assignWaveIndex"),
arguments=[
ast.FunctionCall(
name=ast.Identifier("placeholder"),
arguments=[ast.IntegerLiteral(length)],
),
ast.IntegerLiteral(index),
],
)
for (index, length) in waveform_set
]
return ast.CalibrationStatement(
body=[ast.ExpressionStatement(awi) for awi in awi_statments]
)
# pylint: disable=C0103
# snake_case naming
def visit_Program(self, node: ast.Program):
"""
Program node transformer:
inserts assignWaveformIndex and placeholder statememnets at the beginning
of the program
Args:
node (ast.Program): openQASM program to process
Returns:
ast.Program: same node with waveform declarations inserted
"""
if self.ct:
i = 0
waveform_set = set()
while (
self.ct.table[i].waveform.index is not None
and self.ct.table[i].waveform.length is not None
):
# iterating over the command table items ran indices that were out of
# the bounds of the json schema, could not use for loop/ list
# comprehension
waveform_set.add(
(self.ct.table[i].waveform.index, self.ct.table[i].waveform.length)
)
i += 1
node.statements.insert(1, self.add_assignWaveIndex(waveform_set)) | LOGGER.debug("\n%s", LazyRepr(qasm_dumps, [node])) | 0 | 2023-11-16 17:37:29+00:00 | 12k |
KevinXu02/ControlledDreamGaussian | frankmocap/bodymocap/body_mocap_api.py | [
{
"identifier": "hmr",
"path": "frankmocap/bodymocap/models/hmr.py",
"snippet": "def hmr(smpl_mean_params, pretrained=True, **kwargs):\n \"\"\" Constructs an HMR model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)\n if pretrained:\n resnet_imagenet = resnet.resnet50(pretrained=True)\n model.load_state_dict(resnet_imagenet.state_dict(),strict=False)\n return model"
},
{
"identifier": "SMPL",
"path": "frankmocap/bodymocap/models/smpl.py",
"snippet": "class SMPL(_SMPL):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SMPL, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n JOINT_REGRESSOR_TRAIN_EXTRA = './frankmocap/extra_data/body_module/data_from_spin//J_regressor_extra.npy'\n J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n smpl_output = super(SMPL, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) #Additional 9 joints #Check doc/J_regressor_extra.png\n joints = torch.cat([smpl_output.joints, extra_joints], dim=1) #[N, 24 + 21, 3] + [N, 9, 3]\n joints = joints[:, self.joint_map, :]\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output"
},
{
"identifier": "SMPLX",
"path": "frankmocap/bodymocap/models/smpl.py",
"snippet": "class SMPLX(_SMPLX):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs['ext'] = 'pkl' #We have pkl file\n super(SMPLX, self).__init__(*args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n JOINT_REGRESSOR_TRAIN_EXTRA_SMPLX = 'extra_data/body_module/J_regressor_extra_smplx.npy'\n J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA_SMPLX) #(9, 10475)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))\n self.joint_map = torch.tensor(joints, dtype=torch.long)\n\n def forward(self, *args, **kwargs):\n kwargs['get_skin'] = True\n\n #if pose parameter is for SMPL with 21 joints (ignoring root)\n if(kwargs['body_pose'].shape[1]==69):\n kwargs['body_pose'] = kwargs['body_pose'][:,:-2*3] #Ignore the last two joints (which are on the palm. Not used)\n\n if(kwargs['body_pose'].shape[1]==23):\n kwargs['body_pose'] = kwargs['body_pose'][:,:-2] #Ignore the last two joints (which are on the palm. Not used)\n\n smpl_output = super(SMPLX, self).forward(*args, **kwargs)\n extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)\n # extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices[:,:6890]) *0 #TODO: implement this correctly\n\n #SMPL-X Joint order: https://docs.google.com/spreadsheets/d/1_1dLdaX-sbMkCKr_JzJW_RZCpwBwd7rcKkWT_VgAQ_0/edit#gid=0\n smplx_to_smpl = list(range(0,22)) + [28,43] + list(range(55,76)) # 28 left middle finger , 43: right middle finger 1\n smpl_joints = smpl_output.joints[:,smplx_to_smpl,:] # Convert SMPL-X to SMPL 127 ->45\n joints = torch.cat([smpl_joints, extra_joints], dim=1) # [N, 127, 3]->[N, 45, 3] + [N, 9, 3] # SMPL-X has more joints. should convert 45\n joints = joints[:, self.joint_map, :] \n\n # Hand joints\n smplx_hand_to_panoptic = [0, 13,14,15,16, 1,2,3,17, 4,5,6,18, 10,11,12,19, 7,8,9,20] #Wrist Thumb to Pinky\n\n smplx_lhand = [20] + list(range(25,40)) + list(range(66, 71)) #20 for left wrist. 20 finger joints\n lhand_joints = smpl_output.joints[:,smplx_lhand, :] #(N,21,3)\n lhand_joints = lhand_joints[:, smplx_hand_to_panoptic, :] #Convert SMPL-X hand order to paonptic hand order\n\n smplx_rhand = [21] + list(range(40,55)) + list(range(71, 76)) #21 for right wrist. 20 finger joints\n rhand_joints = smpl_output.joints[:, smplx_rhand, :] #(N,21,3)\n rhand_joints = rhand_joints[:,smplx_hand_to_panoptic,:] #Convert SMPL-X hand order to paonptic hand order\n\n output = ModelOutput(vertices=smpl_output.vertices,\n global_orient=smpl_output.global_orient,\n body_pose=smpl_output.body_pose,\n joints=joints,\n right_hand_joints=rhand_joints, #N,21,3\n left_hand_joints=lhand_joints, #N,21,3\n betas=smpl_output.betas,\n full_pose=smpl_output.full_pose)\n return output"
},
{
"identifier": "constants",
"path": "frankmocap/bodymocap/constants.py",
"snippet": "FOCAL_LENGTH = 5000.\nIMG_RES = 224\nIMG_NORM_MEAN = [0.485, 0.456, 0.406]\nIMG_NORM_STD = [0.229, 0.224, 0.225]\nJOINT_NAMES = [\n'OP Nose', 'OP Neck', 'OP RShoulder', #0,1,2\n'OP RElbow', 'OP RWrist', 'OP LShoulder', #3,4,5\n'OP LElbow', 'OP LWrist', 'OP MidHip', #6, 7,8\n'OP RHip', 'OP RKnee', 'OP RAnkle', #9,10,11\n'OP LHip', 'OP LKnee', 'OP LAnkle', #12,13,14\n'OP REye', 'OP LEye', 'OP REar', #15,16,17\n'OP LEar', 'OP LBigToe', 'OP LSmallToe', #18,19,20\n'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', #21, 22, 23, 24 ##Total 25 joints for openpose\n'Right Ankle', 'Right Knee', 'Right Hip', #0,1,2\n'Left Hip', 'Left Knee', 'Left Ankle', #3, 4, 5\n'Right Wrist', 'Right Elbow', 'Right Shoulder', #6\n'Left Shoulder', 'Left Elbow', 'Left Wrist', #9\n'Neck (LSP)', 'Top of Head (LSP)', #12, 13\n'Pelvis (MPII)', 'Thorax (MPII)', #14, 15\n'Spine (H36M)', 'Jaw (H36M)', #16, 17\n'Head (H36M)', 'Nose', 'Left Eye', #18, 19, 20\n'Right Eye', 'Left Ear', 'Right Ear' #21,22,23 (Total 24 joints)\n]\nJOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}\nJOINT_MAP = {\n'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,\n'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,\n'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,\n'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,\n'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,\n'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,\n'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,\n'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,\n'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,\n'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,\n'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,\n'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,\n'Neck (LSP)': 47, 'Top of Head (LSP)': 48,\n'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,\n'Spine (H36M)': 51, 'Jaw (H36M)': 52,\n'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,\n'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27\n}\nH36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\nH36M_TO_J14 = H36M_TO_J17[:14]\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in J24_FLIP_PERM]"
},
{
"identifier": "crop",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def crop(img, center, scale, res, rot=0):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,\n res[1]+1], center, scale, res, invert=1))-1\n\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n if not rot == 0:\n ul -= pad\n br += pad\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if new_shape[0]>15000 or new_shape[1]>15000:\n print(\"Image Size Too Big! scale{}, new_shape{} br{}, ul{}\".format(scale, new_shape, br, ul))\n return None\n\n\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n\n\n new_img = np.zeros(new_shape, dtype=np.uint8)\n\n # #Compute bbox for Han's format\n # bboxScale_o2n = 224/new_img.shape[0]\n # bboxTopLeft = ul *bboxScale_o2n\n\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n # print(\"{} vs {} || {} vs {}\".format(new_y[1] - new_y[0] , old_y[1] - old_y[0], new_x[1] - new_x[0], old_x[1] -old_x[0] ) )\n if new_y[1] - new_y[0] != old_y[1] - old_y[0] or new_x[1] - new_x[0] != old_x[1] -old_x[0] or new_y[1] - new_y[0] <0 or new_x[1] - new_x[0] <0:\n print(\"Warning: maybe person is out of image boundary!\")\n return None\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],\n old_x[0]:old_x[1]]\n\n if not rot == 0:\n # Remove padding\n new_img = scipy.misc.imrotate(new_img, rot)\n new_img = new_img[pad:-pad, pad:-pad]\n\n new_img = cv2.resize(new_img, tuple(res))\n # new_img = scipy.misc.imresize(new_img, res) #Need this to get the same number with the old model (trained with this resize)\n\n return new_img#, bboxScale_o2n, bboxTopLeft"
},
{
"identifier": "crop_bboxInfo",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def crop_bboxInfo(img, center, scale, res =(224,224)):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,\n res[1]+1], center, scale, res, invert=1))-1\n\n\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n\n new_shape = [br[1] - ul[1], br[0] - ul[0]]\n if len(img.shape) > 2:\n new_shape += [img.shape[2]]\n # new_img = np.zeros(new_shape)\n if new_shape[0] <1 or new_shape[1] <1:\n return None, None, None\n new_img = np.zeros(new_shape, dtype=np.uint8)\n\n if new_img.shape[0] ==0:\n return None, None, None\n\n #Compute bbox for Han's format\n bboxScale_o2n = res[0]/new_img.shape[0] #224/ 531\n\n # Range to fill new array\n new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]\n new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]\n # Range to sample from original image\n old_x = max(0, ul[0]), min(len(img[0]), br[0])\n old_y = max(0, ul[1]), min(len(img), br[1])\n\n if new_y[0] <0 or new_y[1]<0 or new_x[0] <0 or new_x[1]<0 :\n return None, None, None\n\n new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],\n old_x[0]:old_x[1]]\n\n bboxTopLeft_inOriginal = (ul[0], ul[1] )\n\n if new_img.shape[0] <20 or new_img.shape[1]<20:\n return None, None, None\n # print(bboxTopLeft_inOriginal)\n # from renderer import viewer2D\n # viewer2D.ImShow(new_img.astype(np.uint8),name='cropped')\n\n new_img = cv2.resize(new_img, res)\n\n # viewer2D.ImShow(new_img.astype(np.uint8),name='original')\n\n return new_img, bboxScale_o2n, np.array(bboxTopLeft_inOriginal)"
},
{
"identifier": "process_image_bbox",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def process_image_bbox(img_original, bbox_XYWH, input_res=224):\n \"\"\"Read image, do preprocessing and possibly crop it according to the bounding box.\n If there are bounding box annotations, use them to crop the image.\n If no bounding box is specified but openpose detections are available, use them to get the bounding box.\n \"\"\"\n normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)\n img_original = img_original[:,:,::-1].copy() # PyTorch does not support negative stride at the moment\n img = img_original.copy()\n\n center, scale = bbox_from_bbr(bbox_XYWH, imageHeight = img.shape[0])\n if center is None:\n return None, None, None, None, None\n\n img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))\n\n # viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3\n\n if img is None:\n return None, None, None, None, None\n\n\n # unCropped = uncrop(img, center, scale, (input_res, input_res))\n\n # if True:\n # viewer2D.ImShow(img)\n norm_img = (img.copy()).astype(np.float32) / 255.\n norm_img = torch.from_numpy(norm_img).permute(2,0,1)\n norm_img = normalize_img(norm_img.clone())[None]\n\n bboxInfo ={\"center\": center, \"scale\": scale, \"bboxXYWH\":bbox_XYWH}\n return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo"
},
{
"identifier": "process_image_keypoints",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def process_image_keypoints(img, keypoints, input_res=224):\n \"\"\"Read image, do preprocessing and possibly crop it according to the bounding box.\n If there are bounding box annotations, use them to crop the image.\n If no bounding box is specified but openpose detections are available, use them to get the bounding box.\n \"\"\"\n normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)\n img = img[:,:,::-1].copy() # PyTorch does not support negative stride at the moment\n\n center, scale, bbox = bbox_from_keypoints(keypoints, imageHeight = img.shape[0])\n if center is None:\n return None, None, None, None, None\n\n img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))\n\n # viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3\n\n\n if img is None:\n return None, None, None, None, None\n\n\n # unCropped = uncrop(img, center, scale, (input_res, input_res))\n\n # if True:\n # viewer2D.ImShow(img)\n img = img.astype(np.float32) / 255.\n img = torch.from_numpy(img).permute(2,0,1)\n norm_img = normalize_img(img.clone())[None]\n # return img, norm_img, img_original, boxScale_o2n, bboxTopLeft, bbox\n bboxInfo ={\"center\": center, \"scale\": scale, \"bboxXYWH\":bbox}\n return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo"
},
{
"identifier": "bbox_from_keypoints",
"path": "frankmocap/bodymocap/utils/imutils.py",
"snippet": "def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.2, imageHeight= None):\n \"\"\"Get center and scale for bounding box from openpose detections.\"\"\"\n # with open(openpose_file, 'r') as f:\n # data = json.load(f)\n # if 'people' not in data or len(data['people'])==0:\n # return None, None\n # # keypoints = json.load(f)['people'][0]['pose_keypoints_2d']\n # keypoints = data['people'][0]['pose_keypoints_2d']\n keypoints = np.reshape(np.array(keypoints), (-1,3))\n valid = keypoints[:,-1] > detection_thresh\n\n # if g_debugUpperBodyOnly: #Intentionally remove lower bodies\n # valid[ [ 9,10,11,12,13,14, 22,23,24, 19,20,21] ] = False\n\n valid_keypoints = keypoints[valid][:,:-1] #(25,2)\n\n if len(valid_keypoints)<2:\n return None, None, None\n\n\n if False: #Should have all limbs and nose\n if np.sum(valid[ [ 2,3,4, 5,6,7, 9,10, 12,13,1,0] ]) <12:\n return None, None, None\n\n min_pt = np.min(valid_keypoints, axis=0)\n max_pt = np.max(valid_keypoints, axis=0)\n\n \n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n\n\n\n # print(valid_keypoints)\n # print(valid)\n print(bbox)\n\n if imageHeight is not None:\n\n if valid[10]==False and valid[13] == False: # No knees ub ioeb\n max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1]), imageHeight )\n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )\n\n\n elif valid[11]==False and valid[14] == False: #No foot\n max_pt[1] = min(max_pt[1] + (max_pt[1]- min_pt[1])*0.2, imageHeight )\n bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]\n\n valid_keypoints = np.vstack( (valid_keypoints, np.array(max_pt)) )\n\n\n center = valid_keypoints.mean(axis=0)\n bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()\n # adjust bounding box tightness\n scale = bbox_size / 200.0\n scale *= rescale\n return center, scale, bbox"
},
{
"identifier": "convert_smpl_to_bbox",
"path": "frankmocap/mocap_utils/coordconv.py",
"snippet": "def convert_smpl_to_bbox(data3D, scale, trans, bAppTransFirst=False):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n if bAppTransFirst: # Hand model\n data3D[:,0:2] += trans\n data3D *= scale # apply scaling\n else:\n data3D *= scale # apply scaling\n data3D[:,0:2] += trans\n \n data3D*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n # data3D[:,:2]*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n return data3D"
},
{
"identifier": "convert_bbox_to_oriIm",
"path": "frankmocap/mocap_utils/coordconv.py",
"snippet": "def convert_bbox_to_oriIm(data3D, boxScale_o2n, bboxTopLeft, imgSizeW, imgSizeH):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n imgSize = np.array([imgSizeW,imgSizeH])\n\n data3D /= boxScale_o2n\n\n if not isinstance(bboxTopLeft, np.ndarray):\n assert isinstance(bboxTopLeft, tuple)\n assert len(bboxTopLeft) == 2\n bboxTopLeft = np.array(bboxTopLeft)\n\n data3D[:,:2] += (bboxTopLeft + resnet_input_size_half/boxScale_o2n)\n\n return data3D"
}
] | import cv2
import sys
import torch
import numpy as np
import pickle
import frankmocap.mocap_utils.geometry_utils as gu
from torchvision.transforms import Normalize
from frankmocap.bodymocap.models import hmr, SMPL, SMPLX
from frankmocap.bodymocap import constants
from frankmocap.bodymocap.utils.imutils import crop, crop_bboxInfo, process_image_bbox, process_image_keypoints, \
bbox_from_keypoints
from frankmocap.mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm | 7,461 | # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans)
| # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans) | pred_vertices_img = convert_bbox_to_oriIm( | 10 | 2023-11-17 05:21:26+00:00 | 12k |
dazhangyu123/OCL | train_source.py | [
{
"identifier": "Eval",
"path": "utils/eval.py",
"snippet": "class Eval():\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n self.ignore_index = None\n self.synthia = True if num_class == 16 else False\n\n\n def Pixel_Accuracy(self):\n if np.sum(self.confusion_matrix) == 0:\n print(\"Attention: pixel_total is zero!!!\")\n PA = 0\n else:\n PA = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n\n return PA\n\n def Mean_Pixel_Accuracy(self, out_16_13=False):\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n if self.synthia:\n MPA_16 = np.nanmean(MPA[:self.ignore_index])\n MPA_13 = np.nanmean(MPA[synthia_set_16_to_13])\n return MPA_16, MPA_13\n if out_16_13:\n MPA_16 = np.nanmean(MPA[synthia_set_16])\n MPA_13 = np.nanmean(MPA[synthia_set_13])\n return MPA_16, MPA_13\n MPA = np.nanmean(MPA[:self.ignore_index])\n\n return MPA\n\n def Mean_Intersection_over_Union(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n MIoU_16 = np.nanmean(MIoU[:self.ignore_index])\n MIoU_13 = np.nanmean(MIoU[synthia_set_16_to_13])\n return MIoU_16, MIoU_13\n if out_16_13:\n MIoU_16 = np.nanmean(MIoU[synthia_set_16])\n MIoU_13 = np.nanmean(MIoU[synthia_set_13])\n return MIoU_16, MIoU_13\n MIoU = np.nanmean(MIoU[:self.ignore_index])\n\n return MIoU\n\n def Frequency_Weighted_Intersection_over_Union(self, out_16_13=False):\n FWIoU = np.multiply(np.sum(self.confusion_matrix, axis=1), np.diag(self.confusion_matrix))\n FWIoU = FWIoU / (np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n FWIoU_16 = np.sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_16_to_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n if out_16_13:\n FWIoU_16 = np.sum(i for i in FWIoU[synthia_set_16] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n FWIoU = sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n\n return FWIoU\n\n def Mean_Precision(self, out_16_13=False):\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n if self.synthia:\n Precision_16 = np.nanmean(Precision[:self.ignore_index])\n Precision_13 = np.nanmean(Precision[synthia_set_16_to_13])\n return Precision_16, Precision_13\n if out_16_13:\n Precision_16 = np.nanmean(Precision[synthia_set_16])\n Precision_13 = np.nanmean(Precision[synthia_set_13])\n return Precision_16, Precision_13\n Precision = np.nanmean(Precision[:self.ignore_index])\n return Precision\n \n def Print_Every_class_Eval(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n print('===>Everyclass:\\t' + 'MPA\\t' + 'MIoU\\t' + 'PC\\t' + 'Ratio\\t' + 'Pred_Retio')\n if out_16_13: MIoU = MIoU[synthia_set_16]\n for ind_class in range(len(MIoU)):\n pa = str(round(MPA[ind_class] * 100, 2)) if not np.isnan(MPA[ind_class]) else 'nan'\n iou = str(round(MIoU[ind_class] * 100, 2)) if not np.isnan(MIoU[ind_class]) else 'nan'\n pc = str(round(Precision[ind_class] * 100, 2)) if not np.isnan(Precision[ind_class]) else 'nan'\n cr = str(round(Class_ratio[ind_class] * 100, 2)) if not np.isnan(Class_ratio[ind_class]) else 'nan'\n pr = str(round(Pred_retio[ind_class] * 100, 2)) if not np.isnan(Pred_retio[ind_class]) else 'nan'\n print('===>' + name_classes[ind_class] + ':\\t' + pa + '\\t' + iou + '\\t' + pc + '\\t' + cr + '\\t' + pr)\n\n def Get_class_ratio(self):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n return MIoU, Class_ratio, Pred_retio\n\n # generate confusion matrix\n def __generate_matrix(self, gt_image, pre_image):\n\n mask = (gt_image >= 0) & (gt_image < self.num_class)\n label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]\n count = np.bincount(label, minlength=self.num_class**2)\n confusion_matrix = count.reshape(self.num_class, self.num_class)\n return confusion_matrix\n\n def add_batch(self, gt_image, pre_image):\n # assert the size of two images are same\n assert gt_image.shape == pre_image.shape\n\n self.confusion_matrix += self.__generate_matrix(gt_image, pre_image)\n\n def reset(self):\n self.confusion_matrix = np.zeros((self.num_class,) * 2)"
},
{
"identifier": "get_model",
"path": "utils/train_helper.py",
"snippet": "def get_model(args):\n if args.backbone == \"deeplabv2_multi\":\n model = DeeplabMulti(num_classes=args.num_classes,\n pretrained=args.imagenet_pretrained)\n params = model.optim_parameters(args)\n args.numpy_transform = True\n return model, params"
},
{
"identifier": "City_Dataset",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "class City_Dataset(data.Dataset):\n def __init__(self,\n args,\n data_root_path='/data/zyl/dataset/cityscapes',\n list_path=os.path.abspath('./datasets/city_list'),\n split='train',\n base_size=769,\n crop_size=769,\n training=True,\n class_16=False,\n class_13=False):\n \"\"\"\n\n :param root_path:\n :param dataset:\n :param base_size:\n :param is_trainging:\n :param transforms:\n \"\"\"\n self.args = args\n self.data_path=data_root_path\n self.list_path=list_path\n self.split=split\n self.base_size=base_size\n self.crop_size=crop_size\n\n self.base_size = self.base_size if isinstance(self.base_size, tuple) else (self.base_size, self.base_size)\n self.crop_size = self.crop_size if isinstance(self.crop_size, tuple) else (self.crop_size, self.crop_size)\n self.training = training\n\n self.random_mirror = args.random_mirror\n self.random_crop = args.random_crop\n self.resize = args.resize\n self.gaussian_blur = args.gaussian_blur\n\n item_list_filepath = os.path.join(self.list_path, self.split+\".txt\")\n if not os.path.exists(item_list_filepath):\n raise Warning(\"split must be train/val/trainval\")\n\n self.image_filepath = os.path.join(self.data_path, \"leftImg8bit\")\n\n self.gt_filepath = os.path.join(self.data_path, \"gtFine\")\n\n self.items = [id.strip() for id in open(item_list_filepath)]\n\n ignore_label = -1\n self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\n # In SYNTHIA-to-Cityscapes case, only consider 16 shared classes\n self.class_16 = class_16\n synthia_set_16 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_16id = {id:i for i,id in enumerate(synthia_set_16)}\n # In Cityscapes-to-NTHU case, only consider 13 shared classes\n self.class_13 = class_13\n synthia_set_13 = [0, 1, 2, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_13id = {id:i for i,id in enumerate(synthia_set_13)}\n \n print(\"{} num images in Cityscapes {} set have been loaded.\".format(len(self.items), self.split))\n if self.args.numpy_transform:\n print(\"use numpy_transform, instead of tensor transform!\")\n\n def id2trainId(self, label, reverse=False, ignore_label=-1):\n label_copy = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.id_to_trainid.items():\n label_copy[label == k] = v\n if self.class_16:\n label_copy_16 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_16id.items():\n label_copy_16[label_copy == k] = v\n label_copy = label_copy_16\n if self.class_13:\n label_copy_13 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_13id.items():\n label_copy_13[label_copy == k] = v\n label_copy = label_copy_13\n return label_copy\n\n def __getitem__(self, item):\n id = self.items[item]\n filename = id.split(\"train_\")[-1].split(\"val_\")[-1].split(\"test_\")[-1]\n image_filepath = os.path.join(self.image_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n image_filename = filename + \"_leftImg8bit.png\"\n image_path = os.path.join(image_filepath, image_filename)\n image = Image.open(image_path).convert(\"RGB\")\n\n gt_filepath = os.path.join(self.gt_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n gt_filename = filename + \"_gtFine_labelIds.png\"\n gt_image_path = os.path.join(gt_filepath, gt_filename)\n gt_image = Image.open(gt_image_path)\n\n if (self.split == \"train\" or self.split == \"trainval\") and self.training:\n image, gt_image = self._train_sync_transform(image, gt_image)\n else:\n image, gt_image = self._val_sync_transform(image, gt_image)\n\n return image, gt_image, item\n\n def _train_sync_transform(self, img, mask):\n '''\n :param image: PIL input image\n :param gt_image: PIL input gt_image\n :return:\n '''\n if self.random_mirror:\n # random mirror\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if mask: mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n crop_w, crop_h = self.crop_size\n\n if self.random_crop:\n # random scale\n base_w , base_h = self.base_size\n w, h = img.size\n assert w >= h\n if (base_w / w) > (base_h / h):\n base_size = base_w \n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n ow = short_size\n oh = int(1.0 * h * ow / w)\n else:\n base_size = base_h\n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n oh = short_size\n ow = int(1.0 * w * oh / h)\n\n img = img.resize((ow, oh), Image.BICUBIC)\n if mask: mask = mask.resize((ow, oh), Image.NEAREST)\n # pad crop\n if ow < crop_w or oh < crop_h:\n padh = crop_h - oh if oh < crop_h else 0\n padw = crop_w - ow if ow < crop_w else 0\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n if mask: mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)\n # random crop crop_size\n w, h = img.size\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n if mask: mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n if mask: mask = mask.resize(self.crop_size, Image.NEAREST)\n \n if self.gaussian_blur:\n # gaussian blur as in PSP\n if random.random() < 0.5:\n img = img.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n # final transform\n if mask: \n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n else:\n img = self._img_transform(img)\n return img\n\n def _val_sync_transform(self, img, mask):\n if self.random_crop:\n crop_w, crop_h = self.crop_size\n w, h = img.size\n if crop_w / w < crop_h / h:\n oh = crop_h\n ow = int(1.0 * w * oh / h)\n else:\n ow = crop_w\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BICUBIC)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # center crop\n w, h = img.size\n x1 = int(round((w - crop_w) / 2.))\n y1 = int(round((h - crop_h) / 2.))\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n mask = mask.resize(self.crop_size, Image.NEAREST)\n\n # final transform\n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n\n def _img_transform(self, image):\n if self.args.numpy_transform:\n image = np.asarray(image, np.float32)\n image = image[:, :, ::-1] # change to BGR\n image -= IMG_MEAN\n image = image.transpose((2, 0, 1)).copy() # (C x H x W)\n new_image = torch.from_numpy(image)\n else:\n image_transforms = ttransforms.Compose([\n ttransforms.ToTensor(),\n ttransforms.Normalize([.485, .456, .406], [.229, .224, .225]),\n ])\n new_image = image_transforms(image)\n return new_image\n\n def _mask_transform(self, gt_image):\n target = np.asarray(gt_image, np.float32)\n target = self.id2trainId(target).copy()\n target = torch.from_numpy(target)\n\n return target\n\n def __len__(self):\n return len(self.items)"
},
{
"identifier": "City_DataLoader",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "class City_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = City_Dataset(args, \n data_root_path='/mnt/Xsky/zyl/dataset/cityscapes',\n list_path='./datasets/city_list',\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16,\n class_13=args.class_13)\n\n if (self.args.split == \"train\" or self.args.split == \"trainval\") and training:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n\n val_set = City_Dataset(args, \n data_root_path='./datasets/Cityscapes',\n list_path='./datasets/city_list',\n split='val',\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16,\n class_13=args.class_13)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
},
{
"identifier": "inv_preprocess",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "def inv_preprocess(imgs, num_images=1, img_mean=IMG_MEAN, numpy_transform=False):\n \"\"\"Inverse preprocessing of the batch of images.\n \n Args:\n imgs: batch of input images.\n num_images: number of images to apply the inverse transformations on.\n img_mean: vector of mean colour values.\n numpy_transform: whether change RGB to BGR during img_transform.\n \n Returns:\n The batch of the size num_images with the same spatial dimensions as the input.\n \"\"\"\n if numpy_transform:\n imgs = flip(imgs, 1)\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min + 1e-5)\n norm_ip(imgs, float(imgs.min()), float(imgs.max()))\n return imgs"
},
{
"identifier": "decode_labels",
"path": "datasets/cityscapes_Dataset.py",
"snippet": "def decode_labels(mask, num_images=1, num_classes=NUM_CLASSES):\n \"\"\"Decode batch of segmentation masks.\n \n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict.\n \n Returns:\n A batch with num_images RGB images of the same size as the input. \n \"\"\"\n if isinstance(mask, torch.Tensor):\n mask = mask.data.cpu().numpy()\n n, h, w = mask.shape\n if n < num_images:\n num_images = n\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_,j_] = label_colours[k]\n outputs[i] = np.array(img)\n return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)"
},
{
"identifier": "GTA5_DataLoader",
"path": "datasets/gta5_Dataset.py",
"snippet": "class GTA5_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
},
{
"identifier": "SYNTHIA_DataLoader",
"path": "datasets/synthia_Dataset.py",
"snippet": "class SYNTHIA_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size"
}
] | import os
import random
import logging
import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import numpy as np
import sys
import shutil
from tqdm import tqdm
from math import ceil
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from utils.eval import Eval
from utils.train_helper import get_model
from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels
from datasets.gta5_Dataset import GTA5_DataLoader
from datasets.synthia_Dataset import SYNTHIA_DataLoader | 8,576 |
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
|
sys.path.append(os.path.abspath('tools'))
datasets_path={
'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list',
'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit',
'gt_path': './datasets/Cityscapes/gtFine'},
'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list',
'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images',
'gt_path': './datasets/GTA5/labels'},
'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list',
'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB',
'gt_path': './datasets/SYNTHIA/GT/LABELS'},
'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'}
}
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
ITER_MAX = 5000
class Trainer():
def __init__(self, args, cuda=None, train_id="None", logger=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.train_id = train_id
self.logger = logger
self.current_MIoU = 0
self.best_MIou = 0
self.best_source_MIou = 0
self.current_epoch = 0
self.current_iter = 0
self.second_best_MIou = 0
# set TensorboardX
self.writer = SummaryWriter(self.args.checkpoint_dir)
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
self.loss.to(self.device)
# model
self.model, params = get_model(self.args)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.model.to(self.device)
if self.args.optim == "SGD":
self.optimizer = torch.optim.SGD(
params=params,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay
)
elif self.args.optim == "Adam":
self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
# dataloader
if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
elif self.args.dataset=="gta5":
self.dataloader = GTA5_DataLoader(self.args)
else:
self.dataloader = SYNTHIA_DataLoader(self.args)
self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
print(self.args.iter_max, self.dataloader.num_iterations)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
ceil(self.args.iter_stop / self.dataloader.num_iterations)
def main(self):
# display args details
self.logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
self.logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
current_device = torch.cuda.current_device()
self.logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
self.logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth')
self.load_checkpoint(self.args.pretrained_ckpt_file)
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth'))
self.best_iter = self.current_iter
self.best_source_iter = self.current_iter
else:
self.current_epoch = 0
# train
self.train()
self.writer.close()
def train(self):
# self.validate() # check image summary
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.train_one_epoch()
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard | images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform) | 4 | 2023-11-14 02:01:11+00:00 | 12k |
raphaelreme/koft | src/experiments/track.py | [
{
"identifier": "FakeDetector",
"path": "src/detector.py",
"snippet": "class FakeDetector(byotrack.Detector): # TODO: include weight\n def __init__(self, mu: torch.Tensor, noise=1.0, fpr=0.1, fnr=0.2, generate_outside_particles=True):\n self.noise = noise\n self.fpr = fpr\n self.fnr = fnr\n self.mu = mu\n self.n_particles = mu.shape[1]\n self.generate_outside_particles = generate_outside_particles\n\n def run(self, video: Iterable[np.ndarray]) -> Collection[byotrack.Detections]:\n detections_sequence = []\n\n for k, frame in enumerate(tqdm.tqdm(video)):\n frame = frame[..., 0] # Drop channel\n shape = torch.tensor(frame.shape)\n\n detected = torch.rand(self.n_particles) >= self.fnr # Miss some particles (randomly)\n positions = self.mu[k, detected] + torch.randn((detected.sum(), 2)) * self.noise\n positions = positions[(positions > 0).all(dim=-1)]\n positions = positions[(positions < shape - 1).all(dim=-1)]\n\n # Create fake detections\n # 1- Quickly compute the background mask\n mask = torch.tensor(cv2.GaussianBlur(frame, (33, 33), 15) > 0.2)\n mask_proportion = mask.sum().item() / mask.numel()\n\n # 2- Scale fpr by the mask proportion\n n_fake = int(len(positions) * (self.fpr + torch.randn(1).item() * self.fpr / 10) / mask_proportion)\n false_alarm = torch.rand(n_fake, 2) * (shape - 1)\n\n if not self.generate_outside_particles: # Filter fake detections outside the mask\n false_alarm = false_alarm[mask[false_alarm.long()[:, 0], false_alarm.long()[:, 1]]]\n\n positions = torch.cat((positions, false_alarm))\n\n # bbox = torch.cat((positions - 1, torch.zeros_like(positions) + 3), dim=-1)\n detections_sequence.append(\n byotrack.Detections(\n {\n \"position\": positions,\n # \"bbox\": bbox.round().to(torch.int32),\n \"shape\": shape,\n },\n frame_id=k,\n )\n )\n\n return detections_sequence"
},
{
"identifier": "DetectionMetric",
"path": "src/metrics/detections.py",
"snippet": "class DetectionMetric:\n \"\"\"\"\"\"\n\n def __init__(self, dist_thresh: float, greedy=True) -> None:\n self.dist_thresh = dist_thresh\n self.greedy = greedy\n self.lap_solver = pylapy.LapSolver()\n\n def compute_at(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n weight_thresh=0.0,\n ) -> Dict[str, float]:\n \"\"\"Compute the precision, recall and f1 at a given probability and weight thresholds\"\"\"\n if true_weight is not None:\n true_position = true_position[true_weight > weight_thresh]\n\n predicted_position = detections.position[detections.confidence > prob_thresh]\n\n dist = torch.cdist(predicted_position, true_position)\n\n if self.greedy:\n dist[dist > self.dist_thresh] = torch.inf\n tp = self.lap_solver.solve(dist.numpy()).shape[0]\n else:\n tp = self.lap_solver.solve(dist.numpy(), self.dist_thresh).shape[0]\n\n n_pred = len(predicted_position)\n n_true = len(true_position)\n precision = tp / n_pred if n_pred else 1.0\n recall = tp / n_true if n_true else 1.0\n f1 = 2 * tp / (n_true + n_pred) if n_pred + n_true else 1.0\n\n return {\n \"precision\": precision,\n \"recall\": recall,\n \"f1\": f1,\n \"n_pred\": n_pred,\n \"n_true\": n_true,\n \"tp\": tp,\n }\n\n def average_precision_weight(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for weight_thresh in torch.linspace(0, 2.0, 201):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh, weight_thresh.item())\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)\n\n def average_precision_prob(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n weight_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for prob_thresh in torch.linspace(1.0, 0.0, 101):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh.item(), weight_thresh)\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)"
},
{
"identifier": "compute_tracking_metrics",
"path": "src/metrics/tracking.py",
"snippet": "def compute_tracking_metrics(\n tracks: Collection[byotrack.Track], ground_truth: Dict[str, torch.Tensor]\n) -> Dict[str, float]:\n \"\"\"Compute [email protected] (consider that gt matches with pred if dist < 1.5 pixels)\n\n Also returns localization errors when matching at 4.5 pixels.\n\n We choose not to aggregate the HOTA performances at different thresholds, but rather choose one,\n and use LocA to measure localization errors. (Converted in pixels)\n\n Keys:\n HOTA: HOTA at 1.5 pixels\n DetA: Jacquard of detections\n DetPr: Precision of detections\n DetRe: Recall of detections\n AssA: Jacquard of associations\n AssPr: Precision of associations\n AssRe: Recall of associations\n Loca: Localization errors (but at 4.5 pixels)\n \"\"\"\n gt_data = simulator_to_eval(ground_truth[\"mu\"], ground_truth[\"weight\"])\n track_data = tracks_to_eval(tracks)\n data = {**gt_data, **track_data}\n add_similarity(data)\n\n metric = trackeval.metrics.hota.HOTA()\n metrics = metric.eval_sequence(data)\n\n # -6 => 0.7 similarity => 1 - 1.5 / 5\n return {\n \"HOTA\": float(metrics[\"HOTA\"][-6]),\n \"DetA\": float(metrics[\"DetA\"][-6]),\n \"DetPr\": float(metrics[\"DetPr\"][-6]),\n \"DetRe\": float(metrics[\"DetRe\"][-6]),\n \"AssA\": float(metrics[\"AssA\"][-6]),\n \"AssPr\": float(metrics[\"AssPr\"][-6]),\n \"AssRe\": float(metrics[\"AssRe\"][-6]),\n \"Loca\": 5 - 5 * float(metrics[\"LocA\"][1]), # Mean of pixel errors for TP associations\n }"
},
{
"identifier": "constant_kalman_filter",
"path": "src/skt.py",
"snippet": "def constant_kalman_filter(measurement_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n measurement_std = torch.broadcast_to(measurement_std, (dim,))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We only measure the positions\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(dim, state_dim)\n measurement_noise = torch.eye(dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)"
},
{
"identifier": "Dist",
"path": "src/skt.py",
"snippet": "class Dist(enum.Enum):\n MAHALANOBIS = \"mahalanobis\"\n EUCLIDIAN = \"euclidian\"\n LIKELIHOOD = \"likelihood\""
},
{
"identifier": "Method",
"path": "src/skt.py",
"snippet": "class Method(enum.Enum):\n \"\"\"Matching methods\n\n Opt: GDM with Jonker-volgenant algorithm (Linear assignement solver)\n Can be smooth thresholding or hard\n Greedy: Takes the best matches iteratively\n \"\"\"\n\n OPT_SMOOTH = \"opt_smooth\"\n OPT_HARD = \"opt_hard\"\n GREEDY = \"greedy\""
},
{
"identifier": "MatchingConfig",
"path": "src/skt.py",
"snippet": "class MatchingConfig:\n thresh: float\n dist: Dist = Dist.MAHALANOBIS\n method: Method = Method.OPT_SMOOTH"
},
{
"identifier": "SimpleKalmanTracker",
"path": "src/skt.py",
"snippet": "class SimpleKalmanTracker(byotrack.Linker):\n \"\"\"Simple Kalman tracker (SKT)\"\"\"\n\n def __init__(self, kalman_filter: KalmanFilter, match_cfg: MatchingConfig) -> None:\n super().__init__()\n self.kalman_filter = kalman_filter\n self.tracks: List[PartialTrack] = []\n self.active_tracks: List[PartialTrack] = []\n self.state = GaussianState( # Current state of active tracks\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n self.match_cfg = match_cfg\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n ) # The first iteration will predict and associate with 0 tracks, leading to no association\n # Thus creating tracks for all detections in the first frame\n\n for detections in tqdm.tqdm(detections_sequence):\n self.update(detections)\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def match(self, projection: GaussianState, measures: torch.Tensor) -> torch.Tensor:\n \"\"\"Match projection with measures using positions\n\n If velocity measure (KOFT) is available, we do not use it here (Even if it could be better)\n\n Args:\n projection (GaussianState): Projection for all tracks. Only supports 2D (dim_z = 2 or 4\n if velocities are included). Mean: (n, dim_z, 1), Cov: (n, dim_z, dim_z)\n measures (torch.Tensor): Measures to match with tracks. Only supports 2D. Measures can\n include velocities but it won't be used for matching. (Though could be an easy upgrade)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n\n Returns:\n torch.Tensor: Links between tracks and measures\n Shape: (L, 2), dtype: int32\n \"\"\"\n dist: torch.Tensor\n thresh: float\n\n if self.match_cfg.dist in (Dist.MAHALANOBIS, Dist.LIKELIHOOD):\n if projection.precision is None:\n # Register in case someone needs it afterwards (like kf.update)\n projection.precision = projection.covariance.inverse().contiguous()\n\n precision = projection.precision[:, None, :2, :2] # Handle 4d projection with speed. (n, 1, 2, 2)\n # We noticed that it is more efficient to use inv(cov)[:2, :2] rather than inv(cov[:2, :2])...\n # Need more investigatation but: This solution is equivalent to consider than the speed prediction\n # is perfect and using covariance between speed and position to quantify the errors on positions\n # precision != torch.linalg.inv(projection.covariance[:, None, :2, :2])\n\n diff = projection.mean[:, None, :2] - measures[None, :, :2] # Shape: (n, m, 2, 1)\n dist = diff.mT @ precision @ diff # Shape: (n, m, 1, 1)\n if self.match_cfg.dist == Dist.MAHALANOBIS:\n dist = dist[..., 0, 0]\n thresh = self.match_cfg.thresh**2 # No need to take the sqrt, let's compare to the sq thresh\n else: # likelihood\n log_det = torch.log(torch.det(projection.covariance))[:, None] # Shape (N, 1)\n # Dist = - log likelihood\n dist = 0.5 * (diff.shape[2] * torch.log(2 * torch.tensor(torch.pi)) + log_det + dist[..., 0, 0])\n thresh = -torch.log(torch.tensor(self.match_cfg.thresh)).item()\n else: # Euclidian\n dist = torch.cdist(projection.mean[:, :2, 0], measures[:, :2, 0])\n thresh = self.match_cfg.thresh\n\n if self.match_cfg.method == Method.GREEDY:\n links = greedy_assignment_solver(dist.numpy(), thresh)\n else:\n dist[dist > thresh] = torch.inf\n links = pylapy.LapSolver().solve(\n dist.numpy(),\n float(\"inf\") if self.match_cfg.method == Method.OPT_HARD else thresh,\n )\n\n return torch.tensor(links.astype(np.int32))\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, positions, links, detections.frame_id)\n\n def _handle_tracks(\n self, posterior: GaussianState, measures: torch.Tensor, links: torch.Tensor, frame_id: int\n ) -> None:\n \"\"\"Handle tracks to save track data, start new tracks and delete lost ones\n\n Args:\n posterior (GaussianState): Posterior for all active tracks.\n Mean: (n, dim_x, 1), Cov: (n, dim_x, dim_x)\n measures (torch.Tensor): Measures (Only supports 2D). Measures can include velocities (KOFT)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n links (torch.Tensor): Links between tracks and measures\n Shape: (L, 2), dtype: int32\n frame_id (int): Current frame id\n\n \"\"\"\n\n # Save both state and measurement in partial tracks.\n i_to_j = torch.full((len(self.active_tracks),), -1, dtype=torch.int32)\n i_to_j[links[:, 0]] = links[:, 1]\n active_mask = torch.full((len(self.active_tracks),), False)\n still_active = []\n for i, track in enumerate(self.active_tracks):\n j = i_to_j[i]\n if j == -1:\n track.update(posterior.mean[i], posterior.covariance[i], None)\n else:\n track.update(posterior.mean[i], posterior.covariance[i], measures[j])\n\n if track.is_active():\n still_active.append(track)\n active_mask[i] = True\n else:\n self.tracks.append(track)\n\n # Restrict posterior states to active tracks\n posterior = GaussianState(posterior.mean[active_mask], posterior.covariance[active_mask])\n\n # Create new track for every unmatch detection\n measures[links[:, 1]] = torch.nan\n unmatched_measures = measures[~torch.isnan(measures).squeeze().any(dim=-1)]\n\n if not unmatched_measures.numel():\n self.state = posterior\n self.active_tracks = still_active\n return\n\n # Initial state at measures,. Unmeasured state ([velocity, ]acceleration, jerk) are initialize at 0\n # Variance for unmeasured state is the process_noise\n # Variance for measured state is the measurement_noise\n unmatched_state = GaussianState(\n torch.zeros((unmatched_measures.shape[0], self.kalman_filter.state_dim, 1)),\n torch.cat([self.kalman_filter.process_noise[None]] * unmatched_measures.shape[0]),\n )\n unmatched_state.mean[:, : unmatched_measures.shape[1]] = unmatched_measures\n unmatched_state.covariance[\n :, : unmatched_measures.shape[1], : unmatched_measures.shape[1]\n ] = self.kalman_filter.measurement_noise\n\n # Create a new active track for each new state created\n for i in range(unmatched_measures.shape[0]):\n still_active.append(\n PartialTrack(\n len(self.tracks) + len(still_active),\n frame_id,\n unmatched_state.mean[i],\n unmatched_state.covariance[i],\n unmatched_measures[i],\n )\n )\n\n # State is the posterior for all active tracks (concatenation of new tracks with old kept ones)\n self.active_tracks = still_active\n self.state = GaussianState(\n torch.cat((posterior.mean, unmatched_state.mean)),\n torch.cat((posterior.covariance, unmatched_state.covariance)),\n )"
},
{
"identifier": "PartialTrack",
"path": "src/skt.py",
"snippet": "class PartialTrack:\n \"\"\"Partial track class\n\n Partial tracks are created for each unlinked detections, and then updated with following detections.\n It requires CONFIRMED_AT consecutive detections to confirm the tracks (INITIATED => CONFIRMED). If a miss detection\n occurs, it deletes it (INITIATED => DELETED).\n\n Once confirmed, it is resilient to miss detections, waiting MAX_NON_MEASURE frames before ending the track\n (CONFIRMED => ENDED)\n\n Will also store the kalman data for analysis.\n \"\"\"\n\n MAX_NON_MEASURE = 3\n CONFIRMED_AT = 3\n\n class TrackState(enum.IntEnum):\n INITIATED = 0\n CONFIRMED = 1\n ENDED = 2\n DELETED = 3\n\n def __init__(\n self,\n track_id: int,\n start: int,\n mean: torch.Tensor,\n covariance: torch.Tensor,\n measure: torch.Tensor,\n points=(0, 1),\n ) -> None:\n self._points = points # Points data in state\n self.track_id = track_id\n self.start = start\n self.track_state = PartialTrack.TrackState.INITIATED\n self.last_measurement = 0\n self._mean = [mean.clone()]\n self._covariance = [covariance.clone()]\n self._measure = [measure.clone()]\n\n def __len__(self) -> int:\n return len(self._mean) - self.last_measurement\n\n def is_active(self) -> bool:\n return self.track_state < 2\n\n def update(self, mean: torch.Tensor, covariance: torch.Tensor, measure: Optional[torch.Tensor]) -> None:\n \"\"\"Should be called only if the track is active\"\"\"\n self._mean.append(mean.clone())\n self._covariance.append(covariance.clone())\n\n if measure is None: # Not associated with a measure\n self._measure.append(torch.full_like(self._measure[-1], torch.nan))\n self.last_measurement += 1\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n self.track_state = PartialTrack.TrackState.DELETED\n\n elif self.last_measurement >= self.MAX_NON_MEASURE: # Could also check the width of the state covariance\n self.track_state = PartialTrack.TrackState.ENDED\n\n return\n\n self._measure.append(measure.clone())\n self.last_measurement = 0\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n if len(self) >= self.CONFIRMED_AT:\n self.track_state = PartialTrack.TrackState.CONFIRMED\n\n @property\n def points(self) -> torch.Tensor:\n return torch.cat([mean[None, self._points, 0] for mean in self._mean[: len(self)]])"
},
{
"identifier": "constant_koft_filter",
"path": "src/koft.py",
"snippet": "def constant_koft_filter(\n pos_std: torch.Tensor, vel_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1\n) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter with pos and velocity measurements\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n\n assert order >= 1, \"Velocity is measured and has to be set\"\n\n measurement_std = torch.cat((torch.broadcast_to(pos_std, (dim,)), torch.broadcast_to(vel_std, (dim,))))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n measure_dim = 2 * dim\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We measure position and velocity\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(measure_dim, state_dim)\n measurement_noise = torch.eye(measure_dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)"
},
{
"identifier": "OptFlowExtraction",
"path": "src/koft.py",
"snippet": "class OptFlowExtraction(enum.Enum):\n \"\"\"Extraction of optical flow from different positions\"\"\"\n\n DETECTED = 0\n POSTERIOR = 1\n PRIOR = 2"
},
{
"identifier": "SingleUpdateKOFTracker",
"path": "src/koft.py",
"snippet": "class SingleUpdateKOFTracker(SimpleKalmanTracker):\n \"\"\"Kalman and Optical Flow tracker with a single update\n\n Update velocities only for matched tracks and measyre velocity from detected positions\n \"\"\"\n\n __ALWAYS_UPDATE_VEL = False\n\n def __init__(self, kalman_filter: KalmanFilter, opt_flow: OptFlow, match_cfg: MatchingConfig) -> None:\n super().__init__(kalman_filter, match_cfg)\n self.opt_flow = opt_flow\n self.flow = np.zeros((1, 1, 2))\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n assert isinstance(video, Sequence), \"Only indexable videos are supported\"\n\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n # Extract initial frame and prepare for optflow\n frame = video[next(iter(detections_sequence)).frame_id][..., 0]\n src = self.opt_flow.prepare(frame)\n\n for detections in tqdm.tqdm(detections_sequence):\n try:\n # We could compute flow from t-1 to t, or t-1 to t+1\n # But it is much better to compute flow from\n # frame = video[max(detections.frame_id - 1, 0)]\n # src = self.opt_flow.prepare(frame)\n # frame = video[detections.frame_id][..., 0]\n frame = video[detections.frame_id + 1][..., 0]\n except IndexError:\n pass\n\n dest = self.opt_flow.prepare(frame)\n self.flow = self.opt_flow.calc(src, dest) # / 2 if computed from t-1 to t+1\n\n self.update(detections)\n\n src = dest\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Measures = positions + velocities\n velocities = self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n measures = torch.cat([positions, torch.tensor(velocities[..., None]).to(torch.float32)], dim=1)\n\n # Association\n links = self.match(projection, measures)\n\n if self.__ALWAYS_UPDATE_VEL: # Single update for everyone even unmatched tracks (updated with inf pos cov)\n # Add measures for unlinked state\n prior_velocities = self.opt_flow.flow_at(\n self.flow, prior.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n all_measures = torch.cat(\n [prior.mean[:, :2], torch.tensor(prior_velocities[..., None]).to(torch.float32)], dim=1\n )\n all_measures[links[:, 0]] = measures[links[:, 1]]\n\n # For unmatched tracks, uncertainty on measurements (which is the prior here) is set to inf\n # Note that dropping this helps => Future investigation here\n cov = projection.covariance.clone()\n projection.covariance[:, 0, 0] = torch.inf\n projection.covariance[:, 1, 1] = torch.inf\n projection.covariance[links[:, 0]] = cov[links[:, 0]]\n projection.precision = None\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n prior,\n all_measures,\n projection,\n )\n else: # Classic single update\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n measures[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, measures, links, detections.frame_id)"
},
{
"identifier": "TwoUpdateKOFTracker",
"path": "src/koft.py",
"snippet": "class TwoUpdateKOFTracker(SingleUpdateKOFTracker):\n \"\"\"Kalman and Optical Flow tracker\"\"\"\n\n def __init__(\n self,\n kalman_filter: KalmanFilter,\n opt_flow: OptFlow,\n match_cfg: MatchingConfig,\n opt_flow_at=OptFlowExtraction.POSTERIOR,\n always_update_vel=True,\n ) -> None:\n super().__init__(kalman_filter, opt_flow, match_cfg)\n self.opt_flow_at = opt_flow_at\n self.always_update_vel = always_update_vel\n\n def update(self, detections: byotrack.Detections):\n projection = self.kalman_filter.project(\n self.state,\n # self.kalman_filter.measurement_matrix[:2], # Let's also project velocity (useful for matching)\n # self.kalman_filter.measurement_noise[:2, :2],\n )\n\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # First update (Update with associate detections positions)\n posterior = self.kalman_filter.update(\n GaussianState(self.state.mean[links[:, 0]], self.state.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0], :2],\n projection.covariance[links[:, 0], :2, :2],\n None, # /!\\ inv(cov[:2,:2]) != inv(cov)[:2, :2]\n ),\n self.kalman_filter.measurement_matrix[:2],\n self.kalman_filter.measurement_noise[:2, :2],\n )\n\n # Compute velocities\n velocities_measured = torch.tensor( # Measured velocities\n self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n )[..., None].to(torch.float32)\n\n if self.opt_flow_at == OptFlowExtraction.DETECTED:\n velocities = velocities_measured[links[:, 1]]\n elif self.opt_flow_at == OptFlowExtraction.POSTERIOR:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, posterior.mean[..., :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n else: # Prior\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[links[:, 0], :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n\n # Update matched tracks with velocities\n posterior = self.kalman_filter.update(\n posterior,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n measures = torch.cat([positions, velocities_measured], dim=1)\n\n if self.always_update_vel:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n self.state = self.kalman_filter.update( # Update unmatched tracks with velocities\n self.state,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n # Take prior by default if non-linked, else posterior\n self.state.mean[links[:, 0]] = posterior.mean\n self.state.covariance[links[:, 0]] = posterior.covariance\n\n self._handle_tracks(self.state, measures, links, detections.frame_id)\n\n self.state = self.kalman_filter.predict(self.state)"
},
{
"identifier": "farneback",
"path": "src/optical_flow.py",
"snippet": "class OptFlow:\n def __init__(self, method: Callable[[np.ndarray, np.ndarray], np.ndarray], threshs=(0.0, 1.0), scale=2, blur=0.0):\n def prepare(self, frame: np.ndarray) -> np.ndarray:\n def calc(self, source: np.ndarray, destination: np.ndarray) -> np.ndarray:\n def flow_at(flow: np.ndarray, points: np.ndarray, scale: int) -> np.ndarray:\n def transform(self, flow: np.ndarray, points: np.ndarray) -> np.ndarray:"
},
{
"identifier": "enforce_all_seeds",
"path": "src/utils.py",
"snippet": "def enforce_all_seeds(seed: int, strict=True):\n \"\"\"Enforce all the seeds\n\n If strict you may have to define the following env variable:\n CUBLAS_WORKSPACE_CONFIG=:4096:8 (Increase a bit the memory foot print ~25Mo)\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if strict:\n torch.backends.cudnn.benchmark = False # By default should already be to False\n torch.use_deterministic_algorithms(True)"
}
] | import dataclasses
import enum
import pathlib
import dacite
import torch
import tqdm # type: ignore
import yaml # type: ignore
import byotrack
from typing import Collection, List
from byotrack.implementation.detector.wavelet import WaveletDetector
from byotrack.implementation.linker.icy_emht import EMHTParameters, IcyEMHTLinker, Motion
from byotrack.implementation.linker.trackmate.trackmate import TrackMateLinker, TrackMateParameters
from byotrack.implementation.refiner.interpolater import ForwardBackwardInterpolater
from ..detector import FakeDetector
from ..metrics.detections import DetectionMetric
from ..metrics.tracking import compute_tracking_metrics
from ..skt import constant_kalman_filter, Dist, Method, MatchingConfig, SimpleKalmanTracker, PartialTrack
from ..koft import constant_koft_filter, OptFlowExtraction, SingleUpdateKOFTracker, TwoUpdateKOFTracker
from ..optical_flow import farneback
from ..utils import enforce_all_seeds | 9,608 |
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
|
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT: | kalman_filter = constant_kalman_filter( | 3 | 2023-11-10 10:18:39+00:00 | 12k |
david9dragon9/LOMOLite | lomo/lomo_base.py | [
{
"identifier": "LOMO",
"path": "lomo/lomo_orig.py",
"snippet": "class LOMO(Optimizer):\n \"\"\"\n 一个自定义的优化器类LOMO,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param clip_grad_norm: 梯度裁剪的范数阈值\n\n .. note::\n\n clip_grad_norm须为正数\n\n :param clip_grad_value: 梯度裁剪的值域阈值\n \"\"\"\n\n def __init__(self, model, lr=1e-3, clip_grad_norm=None, clip_grad_value=None):\n self.model = model\n self.lr = lr\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.world_size = dist.get_world_size()\n self.clip_grad_norm = clip_grad_norm\n self.clip_grad_value = clip_grad_value\n\n # for grad norm\n if self.clip_grad_norm is not None and self.clip_grad_norm <= 0:\n raise ValueError(\n f\"clip_grad_norm should be positive, got {self.clip_grad_norm}.\"\n )\n self.gather_norm = False\n self.grad_norms = []\n self.clip_coef = None\n\n # check if zero3 is enabled\n p0 = list(self.model.parameters())[0]\n if hasattr(p0, \"ds_tensor\"): # zero3 is enabled\n self.grad_func = self.fuse_update_zero3()\n else:\n self.grad_func = self.fuse_update()\n # check if fp16 is enabled\n if False: # p0.dtype == torch.float16:\n self.loss_scaler = DynamicLossScaler(\n init_scale=2**16,\n ) # TODO: add args\n if self.clip_grad_norm is None:\n raise ValueError(\n \"Loss scaling is recommended to be used with grad norm to get better performance.\"\n )\n else:\n self.loss_scaler = None\n\n # register hook function, which will be called through the backward process\n for n, p in self.model.named_parameters():\n if p.requires_grad:\n p.register_hook(self.grad_func)\n defaults = dict(\n lr=lr, clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value\n )\n super(LOMO, self).__init__(self.model.parameters(), defaults)\n\n def fuse_update(self):\n \"\"\"\n 在非ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度\n \"\"\"\n\n def func(x):\n \"\"\"\n 闭包函数,用于更新模型参数的梯度。\n \"\"\"\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.requires_grad and p.grad is not None:\n if self.loss_scaler:\n if (\n self.loss_scaler.has_overflow_serial\n or self.loss_scaler._has_inf_or_nan(p.grad)\n ):\n # if the overflow is detected, drop the gradient\n p.grad = None\n self.loss_scaler.has_overflow_serial = True\n break\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scaler:\n grad_fp32.div_(self.loss_scaler.loss_scale)\n if self.gather_norm:\n # we adopt two backward pass for gradient norm compuation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else:\n if (\n self.clip_grad_value is not None\n and self.clip_grad_value > 0\n ):\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n p_fp32 = p.data.to(torch.float32)\n p_fp32.add_(grad_fp32, alpha=-self.lr)\n p.data.copy_(p_fp32)\n\n return x\n\n return func\n\n def fuse_update_zero3(self):\n \"\"\"\n 在ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度。\n \"\"\"\n\n def func(x):\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.grad is not None:\n torch.distributed.all_reduce(\n p.grad, op=torch.distributed.ReduceOp.AVG, async_op=False\n )\n if self.loss_scaler:\n if (\n self.loss_scaler.has_overflow_serial\n or self.loss_scaler._has_inf_or_nan(p.grad)\n ):\n # if the overflow is detected, drop the gradient\n p.grad = None\n self.loss_scaler.has_overflow_serial = True\n break\n\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n param_fp32 = p.ds_tensor.to(torch.float32)\n if self.loss_scaler:\n grad_fp32.div_(self.loss_scaler.loss_scale)\n\n if self.gather_norm:\n # we adopt two backward pass for gradient norm compuation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else: # update param\n one_dim_grad_fp32 = grad_fp32.view(-1)\n partition_size = p.ds_tensor.numel()\n start = partition_size * self.local_rank\n end = min(start + partition_size, grad_fp32.numel())\n partitioned_grad_fp32 = one_dim_grad_fp32.narrow(\n 0, start, end - start\n )\n\n if self.clip_grad_value is not None:\n # Clipping gradients by their value\n partitioned_grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n partitioned_grad_fp32.mul_(self.clip_coef)\n\n partitioned_p = param_fp32.narrow(0, 0, end - start)\n partitioned_p.add_(partitioned_grad_fp32, alpha=-self.lr)\n p.ds_tensor[: end - start] = partitioned_p\n return x\n\n return func\n\n def fused_backward(self, loss, lr):\n \"\"\"\n 执行一步反向传播并更新模型的梯度。\n\n :param loss: 模型的loss值\n :param lr: 学习率\n \"\"\"\n self.lr = lr\n # Users need call grad_norm themselves and then call backward_step\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is None\n ):\n raise ValueError(\n \"clip_grad_norm is not None, but clip_coef is None. \"\n \"Please call optimizer.grad_norm() before optimizer.fused_backward().\"\n )\n if self.loss_scaler:\n loss = loss * self.loss_scaler.loss_scale\n loss.backward()\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n def grad_norm(self, loss):\n \"\"\"\n 计算梯度的范数。\n\n :param loss: 模型的loss值\n \"\"\"\n self.gather_norm = True\n self.grad_norms = []\n if self.loss_scaler:\n self.loss_scaler.has_overflow_serial = False\n loss = loss * self.loss_scaler.loss_scale\n loss.backward(retain_graph=True)\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n if self.loss_scaler and self.loss_scaler.has_overflow_serial:\n self.loss_scaler.update_scale(overflow=True)\n with torch.no_grad(): # clear gradients\n for n, p in self.model.named_parameters():\n p.grad = None\n return\n\n with torch.no_grad():\n # The norm is computed over all gradients together, as if they were\n # concatenated into a single vector. Gradients are modified in-place.\n self.grad_norms = torch.stack(self.grad_norms)\n\n total_norm = torch.norm(self.grad_norms, 2.0)\n self.clip_coef = float(self.clip_grad_norm) / (total_norm + 1e-6)\n self.clip_coef = torch.clamp(self.clip_coef, max=1.0)\n self.gather_norm = False"
},
{
"identifier": "AdaLomo",
"path": "lomo/adalomo_orig.py",
"snippet": "class AdaLomo(Optimizer):\n \"\"\"\n 一个自定义的优化器类AdaLomo,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param eps: 正则化系数。eps[0]防止梯度平方太小,eps[1]用于在根据参数的RMS放缩学习率时防止步长太大\n :param clip_threshold: 归一化update矩阵时的阈值\n :param decay_rate: 梯度平方移动平均的衰减率\n :param clip_grad_norm: 梯度裁剪的范数阈值\n\n .. note::\n\n clip_grad_norm须为正数\n :param clip_grad_value: 梯度裁剪的值域阈值\n :param weight_decay: 权重衰减系数,默认值为0.0\n :param loss_scale: 损失缩放系数,可以用来提高训练精度,但是太大可能会导致nan\n \"\"\"\n\n def __init__(\n self,\n model,\n lr=1e-3,\n loss_scale=2**10,\n eps=(1e-30, 1e-3),\n clip_threshold=1.0,\n decay_rate=-0.8,\n clip_grad_norm=None,\n clip_grad_value=None,\n weight_decay=0.0,\n ):\n self.model = model\n self.lr = lr\n self.clip_grad_norm = clip_grad_norm\n self.clip_grad_value = clip_grad_value\n self.weight_decay = weight_decay\n self.loss_scale = loss_scale\n if self.weight_decay > 0.0:\n self.do_weight_decay = True\n else:\n self.do_weight_decay = False\n self.eps = eps\n self.step_num = 0\n self.decay_rate = decay_rate\n self.clip_threshold = clip_threshold\n\n # for grad norm\n if self.clip_grad_norm is not None and self.clip_grad_norm <= 0:\n raise ValueError(\n f\"clip_grad_norm should be positive, got {self.clip_grad_norm}.\"\n )\n self.gather_norm = False\n self.grad_norms = []\n self.clip_coef = None\n\n # check if zero3 is enabled\n self.zero3_enabled = True # is_deepspeed_zero3_enabled()\n if self.zero3_enabled: # zero3 is enabled\n self.grad_func = self.fuse_update_zero3()\n else:\n self.grad_func = self.fuse_update()\n\n self.exp_avg_sq = {}\n self.exp_avg_sq_row = {}\n self.exp_avg_sq_col = {}\n\n # register hook function, which will be called through the backward process\n for n, p in self.model.named_parameters():\n if len(p.ds_shape) == 1:\n self.exp_avg_sq[n] = torch.zeros(\n p.ds_shape[0], dtype=torch.float32\n ).cuda()\n else:\n self.exp_avg_sq_row[n] = torch.zeros(\n p.ds_shape[0], dtype=torch.float32\n ).cuda()\n self.exp_avg_sq_col[n] = torch.zeros(\n p.ds_shape[1], dtype=torch.float32\n ).cuda()\n\n if p.requires_grad:\n p.register_hook(self.grad_func)\n defaults = dict(\n lr=lr,\n eps=eps,\n weight_decay=weight_decay,\n clip_grad_norm=clip_grad_norm,\n clip_grad_value=clip_grad_value,\n )\n super(AdaLomo, self).__init__(self.model.parameters(), defaults)\n self.dp_rank = 0\n\n @staticmethod\n def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):\n # copy from fairseq's adafactor implementation:\n # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505\n r_factor = (\n (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))\n .rsqrt_()\n .unsqueeze(-1)\n )\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def fuse_update(self):\n \"\"\"\n 在非ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度\n \"\"\"\n\n def func(x):\n \"\"\"\n 闭包函数,用于更新模型参数的梯度。\n \"\"\"\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.requires_grad and p.grad is not None:\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scale:\n grad_fp32.div_(self.loss_scale)\n if self.gather_norm:\n # we adopt two backward pass for gradient norm computation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else:\n # grad clip or norm\n if (\n self.clip_grad_value is not None\n and self.clip_grad_value > 0\n ):\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n\n beta2t = 1.0 - math.pow(self.step_num, self.decay_rate)\n update = (grad_fp32**2) + self.eps[0]\n\n if len(p.data.shape) > 1:\n self.exp_avg_sq_row[n].mul_(beta2t).add_(\n update.mean(dim=-1), alpha=1.0 - beta2t\n )\n self.exp_avg_sq_col[n].mul_(beta2t).add_(\n update.mean(dim=-2), alpha=1.0 - beta2t\n )\n update = self._approx_sq_grad(\n self.exp_avg_sq_row[n], self.exp_avg_sq_col[n]\n )\n update.mul_(grad_fp32)\n else:\n self.exp_avg_sq[n].mul_(beta2t).add_(\n update, alpha=1.0 - beta2t\n )\n update = self.exp_avg_sq[n].rsqrt().mul_(grad_fp32)\n\n update.div_(\n (self._rms(update) / self.clip_threshold).clamp_(\n min=1.0\n )\n )\n\n p_fp32 = p.data.to(torch.float32)\n p_rms = torch.norm(p_fp32, 2.0) / math.sqrt(p.numel())\n lr = self.lr\n param_scale = max(self.eps[1], p_rms)\n lr = lr * param_scale\n\n if self.do_weight_decay:\n p_fp32.mul_(1.0 - lr * self.weight_decay)\n p_fp32.add_(update, alpha=-lr)\n p.data.copy_(p_fp32)\n\n return x\n\n return func\n\n def fuse_update_zero3(self):\n \"\"\"\n 在ZeRO模式下更新模型参数的梯度。\n\n :return: func,一个闭包函数,用于更新模型参数的梯度。\n \"\"\"\n\n def func(x):\n with torch.no_grad():\n for n, p in self.model.named_parameters():\n if p.grad is not None:\n torch.distributed.all_reduce(\n p.grad, op=torch.distributed.ReduceOp.AVG, async_op=False\n )\n\n grad_fp32 = p.grad.to(torch.float32)\n p.grad = None\n if self.loss_scale:\n grad_fp32.div_(self.loss_scale)\n\n if self.gather_norm:\n # we adopt two backward pass for gradient norm computation and parameter update, respectively.\n self.grad_norms.append(torch.norm(grad_fp32, 2.0))\n else: # update param\n partition_size = p.ds_tensor.numel()\n start = partition_size * self.dp_rank\n end = min(start + partition_size, grad_fp32.numel())\n\n if self.clip_grad_value is not None:\n # Clipping gradients by their value\n grad_fp32.clamp_(\n min=-self.clip_grad_value, max=self.clip_grad_value\n )\n if (\n self.clip_grad_norm is not None\n and self.clip_grad_norm > 0\n and self.clip_coef is not None\n ):\n # Normalize the gradient according to its norm (computed in another pass)\n grad_fp32.mul_(self.clip_coef)\n\n beta2t = 1.0 - math.pow(self.step_num, self.decay_rate)\n update = (grad_fp32**2) + self.eps[0] # 改成addcmul_\n\n if len(p.ds_shape) > 1:\n self.exp_avg_sq_row[n].mul_(beta2t).add_(\n update.mean(dim=-1), alpha=1.0 - beta2t\n )\n self.exp_avg_sq_col[n].mul_(beta2t).add_(\n update.mean(dim=-2), alpha=1.0 - beta2t\n )\n update = self._approx_sq_grad(\n self.exp_avg_sq_row[n], self.exp_avg_sq_col[n]\n )\n update.mul_(grad_fp32)\n else:\n self.exp_avg_sq[n].mul_(beta2t).add_(\n update, alpha=1.0 - beta2t\n )\n update = self.exp_avg_sq[n].rsqrt().mul_(grad_fp32)\n\n update.div_(\n (self._rms(update) / self.clip_threshold).clamp_(\n min=1.0\n )\n )\n\n one_dim_update = update.view(-1)\n partitioned_update = one_dim_update.narrow(\n 0, start, end - start\n )\n param_fp32 = p.ds_tensor.to(torch.float32)\n partitioned_p = param_fp32.narrow(0, 0, end - start)\n\n p_rms = torch.norm(partitioned_p, 2.0) ** 2\n dist.all_reduce(p_rms, op=torch.distributed.ReduceOp.SUM)\n p_rms = (p_rms / p.ds_numel).sqrt()\n\n lr = self.lr\n param_scale = max(self.eps[1], p_rms)\n lr = lr * param_scale\n\n if self.do_weight_decay:\n partitioned_p.mul_(1.0 - lr * self.weight_decay)\n partitioned_p.add_(partitioned_update, alpha=-lr)\n p.ds_tensor[: end - start] = partitioned_p\n\n return x\n\n return func\n\n def fused_backward(self, loss, lr):\n \"\"\"\n 执行一步反向传播并更新模型的梯度。\n\n :param loss: 模型的loss值\n :param lr: 学习率\n \"\"\"\n self.lr = lr\n if self.loss_scale:\n loss = loss * self.loss_scale\n self.step_num += 1\n loss.backward()\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n def grad_norm(self, loss):\n \"\"\"\n 计算梯度的范数。\n\n :param loss: 模型的loss值\n \"\"\"\n self.gather_norm = True\n self.grad_norms = []\n if self.loss_scale:\n loss = loss * self.loss_scale\n loss.backward(retain_graph=True)\n # update the last parameter since the last parameter in the computaiton graph is not ready when calling hook functions\n # the argument of grad_func is just a placeholder, and it can be anything.\n self.grad_func(0)\n\n with torch.no_grad():\n # The norm is computed over all gradients together, as if they were\n # concatenated into a single vector. Gradients are modified in-place.\n self.grad_norms = torch.stack(self.grad_norms)\n\n total_norm = torch.norm(self.grad_norms, 2.0)\n self.clip_coef = float(self.clip_grad_norm) / (total_norm + 1e-6)\n self.clip_coef = torch.clamp(self.clip_coef, max=1.0)\n self.gather_norm = False"
},
{
"identifier": "LearningRateScheduler",
"path": "lomo/lomo_utils.py",
"snippet": "class LearningRateScheduler:\n r\"\"\"\n Learning rate scheduler with warmup.\n\n :param warmup: if ``warmup`` is an integer, ``warmup`` stands for warmup steps, if ``warmup`` is a float,\n such as 0.1, then it stands for warmup_ratio.\n :param schedule: the learning rate will be adjusted according to ``schedule`` strategy,\n which can be: linear or constant.\n \"\"\"\n\n def __init__(\n self, warmup: float, schedule: str, learning_rate: float, n_steps: int = 0\n ):\n\n self.warmup = max(warmup, 0.0)\n self.schedule = schedule\n self.initial_lr = learning_rate\n\n if self.warmup > 1:\n self.warmup = self.warmup / n_steps\n self.t_steps = max(2, n_steps)\n\n if self.schedule == \"constant\":\n self.get_lr = self._get_constant_lr\n elif self.schedule == \"linear\":\n self.get_lr = self._get_linear_lr\n else:\n raise NotImplementedError(\"Only support 'linear', 'constant'.\")\n\n def _get_constant_lr(self, progress):\n if progress < self.warmup:\n return progress / self.warmup\n return 1\n\n def _get_linear_lr(self, progress):\n if progress < self.warmup:\n return progress / self.warmup\n return max((progress - 1.0) / (self.warmup - 1.0), 0.0)\n\n def step(self, global_step):\n progress = global_step / self.t_steps\n return self.initial_lr * self.get_lr(progress)"
},
{
"identifier": "DynamicLossScaler",
"path": "lomo/lomo_utils.py",
"snippet": "class DynamicLossScaler:\n def __init__(\n self,\n init_scale=2**32,\n scale_factor=2.0,\n scale_window=1000,\n min_scale=1,\n delayed_shift=1,\n consecutive_hysteresis=False,\n raise_error_at_min_scale=True,\n dtype=torch.half,\n ):\n self.cur_scale = init_scale\n self.cur_iter = 0\n self.last_overflow_iter = -1\n self.scale_factor = scale_factor\n self.scale_window = scale_window\n self.min_scale = min_scale\n self.delayed_shift = delayed_shift\n self.cur_hysteresis = delayed_shift\n self.consecutive_hysteresis = consecutive_hysteresis\n self.raise_error_at_min_scale = raise_error_at_min_scale\n self.dtype = dtype\n self.has_overflow_serial = False\n\n @property\n def loss_scale(self):\n return self.cur_scale\n\n # `x` is a torch.Tensor\n def _has_inf_or_nan(self, x):\n try:\n # if x is half, the .float() incurs an additional deep copy, but it's necessary if\n # Pytorch's .sum() creates a one-element tensor of the same type as x\n # (which is true for some recent version of pytorch).\n cpu_sum = float(x.float().sum())\n # More efficient version that can be used if .sum() returns a Python scalar\n # cpu_sum = float(x.sum())\n except RuntimeError as instance:\n # We want to check if inst is actually an overflow exception.\n # RuntimeError could come from a different error.\n # If so, we still want the exception to propagate.\n if \"value cannot be converted\" not in instance.args[0]:\n raise\n return True\n else:\n if cpu_sum in [float(\"inf\"), -float(\"inf\")] or cpu_sum != cpu_sum:\n return True\n return False\n\n # `overflow` is boolean indicating whether the gradient overflowed\n def update_scale(self, overflow):\n if overflow:\n # self.cur_scale /= self.scale_factor\n if self.delayed_shift == 1 or self.cur_hysteresis == 1:\n if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:\n raise Exception(\n \"Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.\"\n )\n else:\n next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)\n if torch.distributed.get_rank() == 0:\n overflow_msg = f\"[deepspeed] OVERFLOW! Rank {torch.distributed.get_rank()} Skipping step.\"\n if self.dtype == torch.half:\n overflow_msg += f\" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}\"\n print(overflow_msg)\n self.cur_scale = next_scale\n else:\n if torch.distributed.get_rank() == 0:\n overflow_msg = f\"[deepspeed] OVERFLOW! Rank {torch.distributed.get_rank()} Skipping step.\"\n if self.dtype == torch.half:\n overflow_msg += f\" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis - 1}\"\n print(overflow_msg)\n self.cur_hysteresis -= 1\n self.last_overflow_iter = self.cur_iter\n else:\n if self.consecutive_hysteresis:\n if torch.distributed.get_rank() == 0:\n hysteresis_msg = f\"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}\"\n print(hysteresis_msg)\n self.cur_hysteresis = self.delayed_shift\n if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:\n if not self.consecutive_hysteresis:\n self.cur_hysteresis = self.delayed_shift\n self.cur_scale *= self.scale_factor\n self.cur_iter += 1"
}
] | import torch
import sys
import os
import tqdm
import deepspeed
import deepspeed
import os
from transformers.deepspeed import HfDeepSpeedConfig
from transformers import AutoConfig
from collections import OrderedDict
from lomo.lomo_orig import LOMO
from lomo.adalomo_orig import AdaLomo
from lomo.lomo_utils import LearningRateScheduler, DynamicLossScaler
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator | 7,298 | # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
return LearningRateScheduler(
learning_rate=learning_rate,
warmup=warmup,
schedule=lr_scheduler_type,
n_steps=n_steps,
)
name_to_lomo = {
| # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
return LearningRateScheduler(
learning_rate=learning_rate,
warmup=warmup,
schedule=lr_scheduler_type,
n_steps=n_steps,
)
name_to_lomo = { | "lomo": LOMO, | 0 | 2023-11-11 03:29:00+00:00 | 12k |
quantuminterface/qiclib | src/qiclib/code/qi_sequencer.py | [
{
"identifier": "QiCellProperty",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self"
},
{
"identifier": "QiVariableSet",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)"
},
{
"identifier": "_QiCalcBase",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )"
},
{
"identifier": "_QiVariableBase",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\""
},
{
"identifier": "QiExpression",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))"
},
{
"identifier": "_QiConstValue",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\""
},
{
"identifier": "QiCondition",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\""
},
{
"identifier": "QiOpCond",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiOpCond(Enum):\n LT = \"<\"\n LE = \"<=\"\n GT = \">\"\n GE = \">=\"\n EQ = \"==\"\n NE = \"!=\"\n\n @staticmethod\n def invert(condition):\n inverted = {\n QiOpCond.EQ: QiOpCond.NE,\n QiOpCond.NE: QiOpCond.EQ,\n QiOpCond.LT: QiOpCond.GE,\n QiOpCond.LE: QiOpCond.GT,\n QiOpCond.GT: QiOpCond.LE,\n QiOpCond.GE: QiOpCond.LT,\n }\n inv = inverted.get(condition)\n if inv is None:\n raise RuntimeError(\"Condition not found: \" + str(condition))\n return inv"
},
{
"identifier": "QiOp",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiOp(Enum):\n PLUS = \"+\"\n MINUS = \"-\"\n MULT = \"*\"\n LSH = \"<<\"\n RSH = \">>\"\n AND = \"&\"\n OR = \"|\"\n XOR = \"^\"\n NOT = \"~\""
},
{
"identifier": "SeqLoad",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqLoad(SeqITypeInst):\n def __init__(\n self,\n dst: int,\n base: int,\n offset: int = 0,\n ):\n \"\"\"Load Sequencer instruction.\n\n :param dst: The register address which will contain the loaded value.\n :param base: The register address which contains the source address.\n :param offset: Constant offset added to the source address. Defaults to 0.\n :param width: Number of bits to be loaded. Defaults to 32.\n :param signed: Is the loaded value signed. Depending on this flag the loaded value is sign extended.\n \"\"\"\n\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to load instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.LOAD,\n SeqMemFunct3.get_from_width(32, False),\n dst,\n base,\n offset,\n )\n\n @property\n def base_reg(self):\n return self.register"
},
{
"identifier": "SeqStore",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqStore(SeqSTypeInst):\n \"\"\"Store Sequencer instruction.\n\n :param src: The register address which contains the value to be stored.\n :param base: The register address which contains the destination address.\n :param offset: Constant offset added to the destination address. Defaults to 0.\n :param width: Number of bits to be stored. Defaults to 32.\n \"\"\"\n\n def __init__(\n self,\n src: int,\n base: int,\n offset: int = 0,\n ):\n assert SequencerInstruction.is_value_in_lower_immediate(\n offset\n ), \"Invalid offset ({offset}) to store instruction.\"\n\n # The hardware currently only supports 32 bit memory accesses.\n super().__init__(\n SeqOpCode.STORE, SeqMemFunct3.get_from_width(32, False), base, src, offset\n )\n\n @property\n def base_reg(self):\n return self.reg1\n\n @property\n def src_reg(self):\n return self.reg2"
},
{
"identifier": "SeqAwaitQubitState",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqAwaitQubitState(SeqITypeInst):\n def __init__(\n self,\n cell: int = 0,\n dst: int = 0,\n ) -> None:\n super().__init__(\n SeqOpCode.SYNCH, SeqExtSynchFunct3.QUBIT_STATE, dst, 0, cell, 0\n )"
},
{
"identifier": "SequencerInstruction",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass"
},
{
"identifier": "SeqRegImmediateInst",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqRegImmediateInst(SeqITypeInst):\n def __init__(\n self,\n operator: QiOp,\n dst_reg: int = 0,\n register: int = 0,\n immediate: SequencerInstruction.imm_type = 0,\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REG_IMM, funct3, dst_reg, register, immediate, funct7\n )"
},
{
"identifier": "SeqRegRegInst",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqRegRegInst(SeqRTypeInst):\n def __init__(\n self, operator: QiOp, dst_reg: int = 0, reg_1: int = 0, reg_2: int = 0\n ) -> None:\n funct3 = super().QiOpToFunct3(operator)\n funct7 = super().QiOpToFunct7(operator)\n super().__init__(\n SeqOpCode.REGISTER_REGISTER, funct3, funct7, dst_reg, reg_1, reg_2\n )"
},
{
"identifier": "SeqLoadUpperImm",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqLoadUpperImm(SeqUTypeInst):\n def __init__(\n self, dst_reg: int = 0, immediate: SequencerInstruction.imm_type = 0\n ) -> None:\n super().__init__(SeqOpCode.LOAD_UPPER_IMM, dst_reg, immediate)"
},
{
"identifier": "SeqJump",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqJump(SequencerInstruction):\n \"\"\"Does not represent actual J-Type instruction, RISC-V only supports address sizes as multiples of 2\"\"\"\n\n def __init__(self, rel_jump: int = 0) -> None:\n super().__init__(SeqOpCode.JUMP)\n self.jump_val = rel_jump\n\n def get_riscv_instruction(self) -> int:\n instruction = 0\n instruction |= self.op.value\n instruction |= (\n (self.jump_val & 0x7F800) >> 11\n ) << SequencerInstruction.OPCODE_WIDTH + SequencerInstruction.REGISTER_WIDTH\n instruction |= (\n ((self.jump_val & 0x400) >> 10)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 8\n )\n instruction |= (\n (self.jump_val & 0x3FF)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 9\n )\n instruction |= (\n ((self.jump_val & 0x80000) >> 19)\n << SequencerInstruction.OPCODE_WIDTH\n + SequencerInstruction.REGISTER_WIDTH\n + 19\n )\n\n return instruction\n\n def __str__(self) -> str:\n return f\"Op: {self.op.name}, immediate: {hex(self.jump_val)}\\n\""
},
{
"identifier": "SeqBranch",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqBranch(SeqBTypeInst):\n def __init__(self, operator, reg1: int, reg2: int, rel_jump: int = 0) -> None:\n op_reg1_reg2 = super().get_register_operation_tuple(operator, reg1, reg2)\n super().__init__(\n SeqOpCode.BRANCH,\n op_reg1_reg2[0],\n op_reg1_reg2[1],\n op_reg1_reg2[2],\n rel_jump,\n )\n\n def set_jump_value(self, jump_val: int):\n self.immediate = jump_val"
},
{
"identifier": "SeqWaitImm",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqWaitImm(SeqUTypeInst):\n def __init__(self, duration: int = 0) -> None:\n super().__init__(\n OpCode=SeqOpCode.WAIT_IMM, immediate=((duration & 0xFFFFF) << 12)\n )\n\n @property\n def immediate(self):\n return self._immediate >> 12\n\n def __str__(self):\n return f\"Op: {self.op.name}, dst: {str(self.dst_reg)}, immediate: {hex(self.immediate & 0x000FFFFF)}\\n\""
},
{
"identifier": "SeqWaitRegister",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.WAIT_REG, dst_reg=reg)"
},
{
"identifier": "SeqTrigger",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqTrigger(SeqUTypeInst):\n def __init__(\n self,\n module0: int = 0,\n module1: int = 0,\n module2: int = 0,\n module3: int = 0,\n module4: int = 0,\n sync=False,\n reset=False,\n ) -> None:\n self._trig_indices = [module0, module1, module2, module3, module4]\n\n immediate = 0\n immediate |= (reset & 0x1) << 12\n immediate |= (sync & 0x1) << 14\n immediate |= (module0 & 0xF) << 16\n immediate |= (module1 & 0xF) << 20\n immediate |= (module2 & 0xF) << 22\n immediate |= (module3 & 0xF) << 26\n immediate |= (module4 & 0xF) << 30\n super().__init__(OpCode=SeqOpCode.TRIGGER, immediate=immediate)\n\n def __str__(self) -> str:\n return (\n f\"Op: {self.op.name}, mod0: {hex(self._trig_indices[0])}, mod1: {hex(self._trig_indices[1])}\"\n f\", mod2: {hex(self._trig_indices[2])}, mod3: {hex(self._trig_indices[3])}, mod4: {hex(self._trig_indices[4])}\\n\"\n )"
},
{
"identifier": "SeqEnd",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqEnd(SeqSTypeInst):\n def __init__(self) -> None:\n super().__init__(SeqOpCode.SYNCH, SeqExtSynchFunct3.START, 0, 0, 0)"
},
{
"identifier": "SeqTriggerWaitRegister",
"path": "src/qiclib/code/qi_seq_instructions.py",
"snippet": "class SeqTriggerWaitRegister(SeqUTypeInst):\n def __init__(self, reg: int) -> None:\n super().__init__(OpCode=SeqOpCode.TRIG_WAIT_REG, dst_reg=reg)"
},
{
"identifier": "_get_for_range_iterations",
"path": "src/qiclib/code/qi_util.py",
"snippet": "def _get_for_range_iterations(start, end, step):\n \"\"\"Returns number of iterations of ForRange or None if start or end are QiVariables.\n Stupid but no need to check validity of input, in case of unrolled loop\"\"\"\n from .qi_var_definitions import _QiVariableBase, _QiConstValue, QiCellProperty\n\n if (\n isinstance(start, _QiVariableBase)\n or start is None\n or isinstance(end, _QiVariableBase)\n or end is None\n ):\n return None\n\n if isinstance(start, (_QiConstValue, QiCellProperty)):\n start = start.value\n if isinstance(end, (_QiConstValue, QiCellProperty)):\n end = end.value\n if isinstance(step, (_QiConstValue, QiCellProperty)):\n step = step.value\n\n iterations = 0\n for _ in range(start, end, step):\n iterations += 1\n return iterations"
}
] | from enum import Enum
from typing import List, Union, Any, Dict, Optional, Tuple
from qiclib.code.qi_jobs import (
ForRange,
If,
Parallel,
cQiRecording,
cQiSync,
)
from .qi_var_definitions import (
QiCellProperty,
QiVariableSet,
_QiCalcBase,
_QiVariableBase,
QiExpression,
_QiConstValue,
QiCondition,
QiOpCond,
QiOp,
)
from .qi_seq_instructions import (
SeqLoad,
SeqStore,
SeqAwaitQubitState,
SequencerInstruction,
SeqRegImmediateInst,
SeqRegRegInst,
SeqLoadUpperImm,
SeqJump,
SeqBranch,
SeqWaitImm,
SeqWaitRegister,
SeqTrigger,
SeqEnd,
SeqTriggerWaitRegister,
)
from .qi_util import _get_for_range_iterations
from .qi_var_definitions import _QiVariableBase
from .qi_var_definitions import _QiCalcBase
from .qi_var_definitions import _QiVariableBase
from .qi_jobs import _cQiPlay_base
import warnings
import qiclib.packages.utility as util | 9,554 | """Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value"""
| # Copyright © 2017-2023 Quantum Interface ([email protected])
# Richard Gebauer, IPE, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
The lower level logic of the code generation.
This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.),
provides helper functions to generate code for expressions and more.
"""
class _Register:
"""Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value""" | if isinstance(self.start, _QiVariableBase): | 3 | 2023-11-10 10:26:10+00:00 | 12k |
jpcadena/fastapi-boilerplate | app/api/api_v1/router/user.py | [
{
"identifier": "get_redis_dep",
"path": "app/api/deps.py",
"snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis"
},
{
"identifier": "get_current_user",
"path": "app/api/oauth2_validation.py",
"snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)"
},
{
"identifier": "get_auth_settings",
"path": "app/config/config.py",
"snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:"
},
{
"identifier": "AuthSettings",
"path": "app/config/db/auth_settings.py",
"snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )"
},
{
"identifier": "InitSettings",
"path": "app/config/init_settings.py",
"snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }"
},
{
"identifier": "Settings",
"path": "app/config/settings.py",
"snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact"
},
{
"identifier": "NotFoundException",
"path": "app/exceptions/exceptions.py",
"snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)"
},
{
"identifier": "ServiceException",
"path": "app/exceptions/exceptions.py",
"snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)"
},
{
"identifier": "UserCreate",
"path": "app/schemas/external/user.py",
"snippet": "class UserCreate(UserBase, UserOptional):\n \"\"\"\n Schema for creating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_create_example,\n )\n\n password: str = Field(\n ...,\n title=\"Password\",\n description=\"Password of the User\",\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)"
},
{
"identifier": "UserCreateResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserCreateResponse(UserID, UserBase):\n \"\"\"\n Schema for the response when creating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_create_response_example,\n )"
},
{
"identifier": "UserResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )"
},
{
"identifier": "UsersResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UsersResponse(BaseModel):\n \"\"\"\n Class representation for a list of users response\n \"\"\"\n\n users: list[UserResponse]"
},
{
"identifier": "UserUpdate",
"path": "app/schemas/external/user.py",
"snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)"
},
{
"identifier": "UserUpdateResponse",
"path": "app/schemas/external/user.py",
"snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )"
},
{
"identifier": "UserAuth",
"path": "app/schemas/infrastructure/user.py",
"snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )"
},
{
"identifier": "CachedUserService",
"path": "app/services/infrastructure/cached_user.py",
"snippet": "class CachedUserService:\n \"\"\"\n Service class for cached user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n ):\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_model_from_cache(self, key: UUID4) -> Optional[User]:\n \"\"\"\n Get the user model instance for the given key from the cache database\n :param key: The unique identifier for the model user instance\n :type key: UUID4\n :return: The user model instance\n :rtype: User\n \"\"\"\n value: Optional[str] = await self._redis.get(str(key))\n if not value:\n return None\n user_data: dict[str, Any] = json.loads(value)\n if address_data := user_data.pop(\"address\", None):\n address_instance: Address = Address(**address_data)\n address_create: AddressDB = AddressDB(\n **address_instance.model_dump()\n )\n user_instance: User = User(address=address_create, **user_data)\n return user_instance\n return None\n\n async def get_schema_from_cache(self, key: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Get the user auth schema instance for the given key from the cache\n database\n :param key: The unique identifier for the user instance\n :type key: UUID4\n :return: The user schema instance\n :rtype: UserResponse\n \"\"\"\n value: Optional[str] = await self._redis.get(str(key))\n if value:\n user_data: dict[str, Any] = json.loads(value)\n if len(user_data.keys()) > 3:\n return UserResponse(**user_data)\n return None\n\n async def set_to_cache(\n self,\n key: UUID4,\n value: dict[str, Any],\n ) -> None:\n \"\"\"\n Set the user schema instance to the cache database using the given key\n :param key: The unique identifier for the user instance\n :type key: UUID4\n :param value: The user schema instance to be used\n :type value: dict[str, Any]\n :return: None\n :rtype: NoneType\n \"\"\"\n await self._redis.setex(\n str(key), self._cache_seconds, json.dumps(custom_serializer(value))\n )"
},
{
"identifier": "UserService",
"path": "app/services/infrastructure/user.py",
"snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}"
},
{
"identifier": "get_user_service",
"path": "app/services/infrastructure/user.py",
"snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)"
},
{
"identifier": "send_new_account_email",
"path": "app/tasks/email_tasks/email_tasks.py",
"snippet": "@with_logging\nasync def send_new_account_email(\n email_to: EmailStr,\n username: str,\n settings: Annotated[Settings, Depends(get_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n) -> None:\n \"\"\"\n Send a new account email\n :param email_to: The email address of the recipient with new\n account\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :return: None\n :rtype: NoneType\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} - \"\n f\"{init_settings.NEW_ACCOUNT_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"new_account.html\", init_settings\n )\n await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"{auth_settings.SERVER_URL}\",\n },\n settings=settings,\n )"
},
{
"identifier": "send_welcome_email",
"path": "app/tasks/email_tasks/email_tasks.py",
"snippet": "@with_logging\nasync def send_welcome_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> None:\n \"\"\"\n Send a welcome email\n :param email_to: The email address of the recipient to welcome\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: None\n :rtype: NoneType\n \"\"\"\n subject: str = (\n f\"{init_settings.WELCOME_SUBJECT}{init_settings.PROJECT_NAME},\"\n f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"welcome.html\", init_settings\n )\n await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"{auth_settings.SERVER_URL}\",\n },\n settings=settings,\n )"
}
] | import logging
from typing import Annotated, Any, Optional
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Body,
Depends,
HTTPException,
Response,
status,
)
from fastapi.params import Path, Query
from pydantic import UUID4, NonNegativeInt, PositiveInt
from redis.asyncio import Redis
from sqlalchemy.exc import SQLAlchemyError
from app.api.deps import get_redis_dep
from app.api.oauth2_validation import get_current_user
from app.config.config import (
get_auth_settings,
get_init_settings,
get_settings,
init_setting,
)
from app.config.db.auth_settings import AuthSettings
from app.config.init_settings import InitSettings
from app.config.settings import Settings
from app.exceptions.exceptions import NotFoundException, ServiceException
from app.schemas.external.user import (
UserCreate,
UserCreateResponse,
UserResponse,
UsersResponse,
UserUpdate,
UserUpdateResponse,
)
from app.schemas.infrastructure.user import UserAuth
from app.services.infrastructure.cached_user import CachedUserService
from app.services.infrastructure.user import UserService, get_user_service
from app.tasks.email_tasks.email_tasks import (
send_new_account_email,
send_welcome_email,
) | 10,334 | """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
| """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
| @router.get("", response_model=UsersResponse) | 11 | 2023-11-17 00:32:32+00:00 | 12k |
dataaug/open-interpreter-free | interpreter/core/core.py | [
{
"identifier": "cli",
"path": "interpreter/cli/cli.py",
"snippet": "def cli(interpreter):\n parser = argparse.ArgumentParser(description=\"Open Interpreter\")\n\n # Add arguments\n for arg in arguments:\n if arg[\"type\"] == bool:\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n action=\"store_true\",\n default=None,\n )\n else:\n choices = arg[\"choices\"] if \"choices\" in arg else None\n default = arg[\"default\"] if \"default\" in arg else None\n\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n type=arg[\"type\"],\n choices=choices,\n default=default,\n )\n\n # Add special arguments\n parser.add_argument(\n \"--config\",\n dest=\"config\",\n action=\"store_true\",\n help=\"open config.yaml file in text editor\",\n )\n parser.add_argument(\n \"--conversations\",\n dest=\"conversations\",\n action=\"store_true\",\n help=\"list conversations to resume\",\n )\n parser.add_argument(\n \"-f\",\n \"--fast\",\n dest=\"fast\",\n action=\"store_true\",\n help=\"run `interpreter --model gpt-3.5-turbo`\",\n )\n parser.add_argument(\n \"--version\",\n dest=\"version\",\n action=\"store_true\",\n help=\"get Open Interpreter's version number\",\n )\n\n args = parser.parse_args()\n\n # This should be pushed into an open_config.py util\n # If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir\n if args.config:\n if args.config_file:\n config_file = get_config_path(args.config_file)\n else:\n config_file = get_config_path()\n\n print(f\"Opening `{config_file}`...\")\n\n # Use the default system editor to open the file\n if platform.system() == \"Windows\":\n os.startfile(\n config_file\n ) # This will open the file with the default application, e.g., Notepad\n else:\n try:\n # Try using xdg-open on non-Windows platforms\n subprocess.call([\"xdg-open\", config_file])\n except FileNotFoundError:\n # Fallback to using 'open' on macOS if 'xdg-open' is not available\n subprocess.call([\"open\", config_file])\n return\n\n if args.local:\n # Default local (LM studio) attributes\n interpreter.system_message = \"You are an AI.\"\n interpreter.model = (\n \"openai/\" + interpreter.model\n ) # This tells LiteLLM it's an OpenAI compatible server\n interpreter.api_base = \"http://localhost:1234/v1\"\n interpreter.max_tokens = 1000\n interpreter.context_window = 3000\n interpreter.api_key = \"0\"\n\n display_markdown_message(\n \"\"\"\n> Open Interpreter's local mode is powered by **`LM Studio`**.\n\n\nYou will need to run **LM Studio** in the background.\n\n1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/) then start it.\n2. Select a language model then click **Download**.\n3. Click the **<->** button on the left (below the chat button).\n4. Select your model at the top, then click **Start Server**.\n\n\nOnce the server is running, you can begin your conversation below.\n\n> **Warning:** This feature is highly experimental.\n> Don't expect `gpt-3.5` / `gpt-4` level quality, speed, or reliability yet!\n\n\"\"\"\n )\n\n # Set attributes on interpreter\n for attr_name, attr_value in vars(args).items():\n # Ignore things that aren't possible attributes on interpreter\n if attr_value is not None and hasattr(interpreter, attr_name):\n # If the user has provided a config file, load it and extend interpreter's configuration\n if attr_name == \"config_file\":\n user_config = get_config_path(attr_value)\n interpreter.config_file = user_config\n interpreter.extend_config(config_path=user_config)\n else:\n setattr(interpreter, attr_name, attr_value)\n\n # if safe_mode and auto_run are enabled, safe_mode disables auto_run\n if interpreter.auto_run and (\n interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\"\n ):\n setattr(interpreter, \"auto_run\", False)\n\n # If --conversations is used, run conversation_navigator\n if args.conversations:\n conversation_navigator(interpreter)\n return\n\n if args.version:\n version = pkg_resources.get_distribution(\"open-interpreter\").version\n print(f\"Open Interpreter {version}\")\n return\n\n if args.fast:\n interpreter.model = \"gpt-3.5-turbo\"\n\n if args.vision:\n interpreter.vision = True\n interpreter.model = \"gpt-4-vision-preview\"\n interpreter.system_message += \"\\nThe user will show you an image of the code you write. You can view images directly. Be sure to actually write a markdown code block for almost every user request! Almost EVERY message should include a markdown code block. Do not end your message prematurely!\\n\\nFor HTML: This will be run STATELESSLY. You may NEVER write '<!-- previous code here... --!>' or `<!-- header will go here -->` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.\\nIf the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message.\"\n interpreter.function_calling_llm = False\n interpreter.context_window = 110000\n interpreter.max_tokens = 4096\n\n display_markdown_message(\"> `Vision` enabled **(experimental)**\\n\")\n\n interpreter.chat()"
},
{
"identifier": "setup_llm",
"path": "interpreter/llm/setup_llm.py",
"snippet": "def setup_llm(interpreter):\n \"\"\"\n Takes an Interpreter (which includes a ton of LLM settings),\n returns a Coding LLM (a generator that streams deltas with `message` and `code`).\n \"\"\"\n # gpt4fre\n gpt4free = True\n if gpt4free:\n text_llm = setup_gpt4free_llm(interpreter)\n coding_llm = convert_to_coding_gpt4free_llm(text_llm, debug_mode=interpreter.debug_mode)\n return coding_llm\n\n # Detect whether or not it's a function calling LLM\n if interpreter.function_calling_llm == None:\n if not interpreter.local and (\n interpreter.model in litellm.open_ai_chat_completion_models\n or interpreter.model.startswith(\"azure/\")\n ):\n interpreter.function_calling_llm = True\n else:\n interpreter.function_calling_llm = False\n\n if interpreter.function_calling_llm:\n # Function-calling LLM\n coding_llm = setup_openai_coding_llm(interpreter)\n else:\n # If disable_procedures has not been set manually:\n if interpreter.disable_procedures == None:\n # Disable procedures, which confuses most of these models (except GPT-4V)\n\n if interpreter.model != \"gpt-4-vision-preview\":\n interpreter.disable_procedures = True\n\n # Non-function-calling LLM\n text_llm = setup_text_llm(interpreter)\n coding_llm = convert_to_coding_llm(text_llm, debug_mode=interpreter.debug_mode)\n\n return coding_llm"
},
{
"identifier": "terminal_interface",
"path": "interpreter/terminal_interface/terminal_interface.py",
"snippet": "def terminal_interface(interpreter, message):\n # Auto run and local don't display messages.\n # Probably worth abstracting this to something like \"verbose_cli\" at some point.\n if not interpreter.auto_run and not interpreter.local:\n interpreter_intro_message = [\n \"**Open Interpreter** will require approval before running code.\"\n ]\n\n if interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\":\n if not check_for_package(\"semgrep\"):\n interpreter_intro_message.append(\n f\"**Safe Mode**: {interpreter.safe_mode}\\n\\n>Note: **Safe Mode** requires `semgrep` (`pip install semgrep`)\"\n )\n else:\n interpreter_intro_message.append(\"Use `interpreter -y` to bypass this.\")\n\n interpreter_intro_message.append(\"Press `CTRL-C` to exit.\")\n\n display_markdown_message(\"\\n\\n\".join(interpreter_intro_message) + \"\\n\")\n\n active_block = None\n\n if message:\n interactive = False\n else:\n interactive = True\n\n while True:\n try:\n if interactive:\n message = input(\"> \").strip()\n\n try:\n # This lets users hit the up arrow key for past messages\n readline.add_history(message)\n except:\n # If the user doesn't have readline (may be the case on windows), that's fine\n pass\n\n except KeyboardInterrupt:\n # Exit gracefully\n break\n\n if message.startswith(\"%\") and interactive:\n handle_magic_command(interpreter, message)\n continue\n\n # Many users do this\n if message.strip() == \"interpreter --local\":\n print(\"Please press CTRL-C then run `interpreter --local`.\")\n continue\n\n if True: ################## interpreter.vision:\n # Is the input a path to an image? Like they just dragged it into the terminal?\n image_path = find_image_path(message)\n\n ## If we found an image, add it to the message\n if image_path:\n if interpreter.debug_mode:\n print(\"Found image:\", image_path)\n # Turn it into base64\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n file_extension = image_path.split(\".\")[-1]\n message = {\n \"role\": \"user\",\n \"message\": message,\n \"image\": f\"data:image/{file_extension};base64,{encoded_string}\",\n }\n\n # Track if we've ran a code block.\n # We'll use this to determine if we should render a new code block,\n # In the event we get code -> output -> code again\n ran_code_block = False\n render_cursor = True\n\n try:\n for chunk in interpreter.chat(message, display=False, stream=True):\n if interpreter.debug_mode:\n print(\"Chunk in `terminal_interface`:\", chunk)\n\n # Message\n if \"message\" in chunk:\n if active_block is None:\n active_block = MessageBlock()\n if active_block.type != \"message\":\n active_block.end()\n active_block = MessageBlock()\n active_block.message += chunk[\"message\"]\n render_cursor = True\n\n # Code\n if \"code\" in chunk or \"language\" in chunk:\n if active_block is None:\n active_block = CodeBlock()\n if active_block.type != \"code\" or ran_code_block:\n # If the last block wasn't a code block,\n # or it was, but we already ran it:\n active_block.end()\n active_block = CodeBlock()\n ran_code_block = False\n render_cursor = True\n\n if \"language\" in chunk:\n active_block.language = chunk[\"language\"]\n if \"code\" in chunk:\n active_block.code += chunk[\"code\"]\n if \"active_line\" in chunk:\n active_block.active_line = chunk[\"active_line\"]\n\n # Execution notice\n if \"executing\" in chunk:\n if not interpreter.auto_run:\n # OI is about to execute code. The user wants to approve this\n\n # End the active block so you can run input() below it\n active_block.end()\n\n should_scan_code = False\n\n if not interpreter.safe_mode == \"off\":\n if interpreter.safe_mode == \"auto\":\n should_scan_code = True\n elif interpreter.safe_mode == \"ask\":\n response = input(\n \" Would you like to scan this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n should_scan_code = True\n\n if should_scan_code:\n # Get code language and actual code from the chunk\n # We need to give these to semgrep when we start our scan\n language = chunk[\"executing\"][\"language\"]\n code = chunk[\"executing\"][\"code\"]\n\n scan_code(code, language, interpreter)\n\n response = input(\n \" Would you like to run this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n # Create a new, identical block where the code will actually be run\n # Conveniently, the chunk includes everything we need to do this:\n active_block = CodeBlock()\n active_block.margin_top = False # <- Aesthetic choice\n active_block.language = chunk[\"executing\"][\"language\"]\n active_block.code = chunk[\"executing\"][\"code\"]\n else:\n # User declined to run code.\n interpreter.messages.append(\n {\n \"role\": \"user\",\n \"message\": \"I have declined to run this code.\",\n }\n )\n break\n\n if \"image\" in chunk or \"html\" in chunk or \"javascript\" in chunk:\n # Good to keep the LLM informed <3\n message_for_llm = display_output(chunk)\n if message_for_llm:\n if \"output\" in interpreter.messages[-1]:\n interpreter.messages[-1][\"output\"] += \"\\n\" + message_for_llm\n else:\n interpreter.messages[-1][\"output\"] = message_for_llm\n\n # I know this is insane, but the easiest way to now display this\n # is to set the chunk to an output chunk, which will trigger the next conditional!\n\n chunk = {\"output\": message_for_llm}\n\n # Output\n if \"output\" in chunk:\n ran_code_block = True\n render_cursor = False\n active_block.output += \"\\n\" + chunk[\"output\"]\n active_block.output = (\n active_block.output.strip()\n ) # <- Aesthetic choice\n\n # Truncate output\n active_block.output = truncate_output(\n active_block.output, interpreter.max_output\n )\n\n if active_block:\n active_block.refresh(cursor=render_cursor)\n\n yield chunk\n\n # (Sometimes -- like if they CTRL-C quickly -- active_block is still None here)\n if active_block:\n active_block.end()\n active_block = None\n\n if not interactive:\n # Don't loop\n break\n\n except KeyboardInterrupt:\n # Exit gracefully\n if active_block:\n active_block.end()\n active_block = None\n\n if interactive:\n # (this cancels LLM, returns to the interactive \"> \" input)\n continue\n else:\n break\n except:\n system_info(interpreter)\n raise"
},
{
"identifier": "validate_llm_settings",
"path": "interpreter/terminal_interface/validate_llm_settings.py",
"snippet": "def validate_llm_settings(interpreter):\n \"\"\"\n Interactivley prompt the user for required LLM settings\n \"\"\"\n\n # This runs in a while loop so `continue` lets us start from the top\n # after changing settings (like switching to/from local)\n while True:\n if interpreter.local:\n # We have already displayed a message.\n # (This strange behavior makes me think validate_llm_settings needs to be rethought / refactored)\n break\n\n else:\n # Ensure API keys are set as environment variables\n\n # OpenAI\n if interpreter.model in litellm.open_ai_chat_completion_models:\n if not os.environ.get(\"OPENAI_API_KEY\") and not interpreter.api_key:\n display_welcome_message_once()\n\n display_markdown_message(\n \"\"\"---\n > OpenAI API key not found\n\n To use `GPT-4` (highly recommended) please provide an OpenAI API key.\n\n To use another language model, consult the documentation at [docs.openinterpreter.com](https://docs.openinterpreter.com/language-model-setup/).\n \n ---\n \"\"\"\n )\n\n response = getpass.getpass(\"OpenAI API key: \")\n print(f\"OpenAI API key: {response[:4]}...{response[-4:]}\")\n\n display_markdown_message(\n \"\"\"\n\n **Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows.\n \n ---\"\"\"\n )\n\n interpreter.api_key = response\n time.sleep(2)\n break\n\n # This is a model we don't have checks for yet.\n break\n\n # If we're here, we passed all the checks.\n\n # Auto-run is for fast, light useage -- no messages.\n # If local, we've already displayed a message.\n if not interpreter.auto_run and not interpreter.local:\n display_markdown_message(f\"> Model set to `{interpreter.model}`\")\n return"
},
{
"identifier": "check_for_update",
"path": "interpreter/utils/check_for_update.py",
"snippet": "def check_for_update():\n # Fetch the latest version from the PyPI API\n response = requests.get(f\"https://pypi.org/pypi/open-interpreter/json\")\n latest_version = response.json()[\"info\"][\"version\"]\n\n # Get the current version using pkg_resources\n current_version = pkg_resources.get_distribution(\"open-interpreter\").version\n\n return version.parse(latest_version) > version.parse(current_version)"
},
{
"identifier": "display_markdown_message",
"path": "interpreter/utils/display_markdown_message.py",
"snippet": "def display_markdown_message(message):\n \"\"\"\n Display markdown message. Works with multiline strings with lots of indentation.\n Will automatically make single line > tags beautiful.\n \"\"\"\n\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rich_print(Rule(style=\"white\"))\n else:\n rich_print(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n # Aesthetic choice. For these tags, they need a space below them\n print(\"\")"
},
{
"identifier": "get_config",
"path": "interpreter/utils/get_config.py",
"snippet": "def get_config_path(path=user_config_path):\ndef get_config(path=user_config_path):"
},
{
"identifier": "get_storage_path",
"path": "interpreter/utils/local_storage_path.py",
"snippet": "def get_storage_path(subdirectory=None):\n if subdirectory is None:\n return config_dir\n else:\n return os.path.join(config_dir, subdirectory)"
},
{
"identifier": "generate_system_message",
"path": "interpreter/core/generate_system_message.py",
"snippet": "def generate_system_message(interpreter):\n \"\"\"\n Dynamically generate a system message.\n\n Takes an interpreter instance,\n returns a string.\n\n This is easy to replace!\n Just swap out `interpreter.generate_system_message` with another function.\n \"\"\"\n\n #### Start with the static system message\n\n system_message = interpreter.system_message\n\n #### Add dynamic components, like the user's OS, username, relevant procedures, etc\n\n system_message += \"\\n\" + get_user_info_string()\n\n if not interpreter.local and not interpreter.disable_procedures:\n try:\n system_message += \"\\n\" + get_relevant_procedures_string(\n interpreter.messages\n )\n except:\n if interpreter.debug_mode:\n print(traceback.format_exc())\n # It's okay if they can't. This just fixes some common mistakes it makes.\n\n return system_message"
},
{
"identifier": "respond",
"path": "interpreter/core/respond.py",
"snippet": "def respond(interpreter):\n \"\"\"\n Yields tokens, but also adds them to interpreter.messages. TBH probably would be good to seperate those two responsibilities someday soon\n Responds until it decides not to run any more code or say anything else.\n \"\"\"\n\n last_unsupported_code = \"\"\n\n while True:\n system_message = interpreter.generate_system_message()\n\n # Create message object\n system_message = {\"role\": \"system\", \"message\": system_message}\n\n # Create the version of messages that we'll send to the LLM\n messages_for_llm = interpreter.messages.copy()\n messages_for_llm = [system_message] + messages_for_llm\n\n # It's best to explicitly tell these LLMs when they don't get an output\n for message in messages_for_llm:\n if \"output\" in message and message[\"output\"] == \"\":\n message[\"output\"] = \"No output\"\n\n ### RUN THE LLM ###\n\n # Add a new message from the assistant to interpreter's \"messages\" attribute\n # (This doesn't go to the LLM. We fill this up w/ the LLM's response)\n interpreter.messages.append({\"role\": \"assistant\"})\n\n # Start putting chunks into the new message\n # + yielding chunks to the user\n try:\n # Track the type of chunk that the coding LLM is emitting\n chunk_type = None\n\n for chunk in interpreter._llm(messages_for_llm):\n # Add chunk to the last message\n interpreter.messages[-1] = merge_deltas(interpreter.messages[-1], chunk)\n\n # This is a coding llm\n # It will yield dict with either a message, language, or code (or language AND code)\n\n # We also want to track which it's sending to we can send useful flags.\n # (otherwise pretty much everyone needs to implement this)\n for new_chunk_type in [\"message\", \"language\", \"code\"]:\n if new_chunk_type in chunk and chunk_type != new_chunk_type:\n if chunk_type:\n yield {f\"end_of_{chunk_type}\": True}\n # Language is actually from a code block\n if new_chunk_type == \"language\":\n new_chunk_type = \"code\"\n chunk_type = new_chunk_type\n yield {f\"start_of_{chunk_type}\": True}\n\n yield chunk\n\n # We don't trigger the end_of_message or end_of_code flag if we actually end on either (we just exit the loop above)\n yield {f\"end_of_{chunk_type}\": True}\n\n except litellm.exceptions.BudgetExceededError:\n display_markdown_message(\n f\"\"\"> Max budget exceeded\n\n **Session spend:** ${litellm._current_cost}\n **Max budget:** ${interpreter.max_budget}\n\n Press CTRL-C then run `interpreter --max_budget [higher USD amount]` to proceed.\n \"\"\"\n )\n break\n # Provide extra information on how to change API keys, if we encounter that error\n # (Many people writing GitHub issues were struggling with this)\n except Exception as e:\n if (\n interpreter.local == False\n and \"auth\" in str(e).lower()\n or \"api key\" in str(e).lower()\n ):\n output = traceback.format_exc()\n raise Exception(\n f\"{output}\\n\\nThere might be an issue with your API key(s).\\n\\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here',\\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\\n\\n\"\n )\n elif interpreter.local:\n raise Exception(\n str(e)\n + \"\"\"\n\nPlease make sure LM Studio's local server is running by following the steps above.\n\nIf LM Studio's local server is running, please try a language model with a different architecture.\n\n \"\"\"\n )\n else:\n raise\n\n ### RUN CODE (if it's there) ###\n\n if \"code\" in interpreter.messages[-1]:\n if interpreter.debug_mode:\n print(\"Running code:\", interpreter.messages[-1])\n\n try:\n # What code do you want to run?\n code = interpreter.messages[-1][\"code\"]\n\n # Fix a common error where the LLM thinks it's in a Jupyter notebook\n if interpreter.messages[-1][\"language\"] == \"python\" and code.startswith(\n \"!\"\n ):\n code = code[1:]\n interpreter.messages[-1][\"code\"] = code\n interpreter.messages[-1][\"language\"] = \"shell\"\n\n # Get a code interpreter to run it\n language = interpreter.messages[-1][\"language\"].lower().strip()\n if language in language_map:\n if language not in interpreter._code_interpreters:\n # Create code interpreter\n config = {\"language\": language, \"vision\": interpreter.vision}\n interpreter._code_interpreters[\n language\n ] = create_code_interpreter(config)\n code_interpreter = interpreter._code_interpreters[language]\n else:\n # This still prints the code but don't allow code to run. Let's Open-Interpreter know through output message\n\n output = (\n f\"Open Interpreter does not currently support `{language}`.\"\n )\n\n yield {\"output\": output}\n interpreter.messages[-1][\"output\"] = output\n\n # Let the response continue so it can deal with the unsupported code in another way. Also prevent looping on the same piece of code.\n if code != last_unsupported_code:\n last_unsupported_code = code\n continue\n else:\n break\n\n # Yield a message, such that the user can stop code execution if they want to\n try:\n yield {\"executing\": {\"code\": code, \"language\": language}}\n except GeneratorExit:\n # The user might exit here.\n # We need to tell python what we (the generator) should do if they exit\n break\n\n # Yield each line, also append it to last messages' output\n interpreter.messages[-1][\"output\"] = \"\"\n for line in code_interpreter.run(code):\n yield line\n if \"output\" in line:\n output = interpreter.messages[-1][\"output\"]\n output += \"\\n\" + line[\"output\"]\n\n # Truncate output\n output = truncate_output(output, interpreter.max_output)\n\n interpreter.messages[-1][\"output\"] = output.strip()\n # Vision\n if interpreter.vision:\n base64_image = None\n if \"image\" in line:\n base64_image = line[\"image\"]\n if \"html\" in line:\n base64_image = html_to_base64(line[\"html\"])\n\n if base64_image:\n yield {\"output\": \"Sending image output to GPT-4V...\"}\n interpreter.messages[-1][\n \"image\"\n ] = f\"data:image/jpeg;base64,{base64_image}\"\n\n except:\n output = traceback.format_exc()\n yield {\"output\": output.strip()}\n interpreter.messages[-1][\"output\"] = output.strip()\n\n yield {\"active_line\": None}\n yield {\"end_of_execution\": True}\n\n else:\n # Doesn't want to run code. We're done\n break\n\n return"
}
] | import json
import os
from datetime import datetime
from ..cli.cli import cli
from ..llm.setup_llm import setup_llm
from ..terminal_interface.terminal_interface import terminal_interface
from ..terminal_interface.validate_llm_settings import validate_llm_settings
from ..utils.check_for_update import check_for_update
from ..utils.display_markdown_message import display_markdown_message
from ..utils.get_config import get_config, user_config_path
from ..utils.local_storage_path import get_storage_path
from .generate_system_message import generate_system_message
from .respond import respond | 7,741 | self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
self._llm = setup_llm(self)
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
# Quite different from the plain generator stuff. So redirect to that
if display:
yield from terminal_interface(self, message)
return
# One-off message
if message or message == "":
if message == "":
message = "No entry from user - please suggest something to enter."
## We support multiple formats for the incoming message:
# Dict (these are passed directly in)
if isinstance(message, dict):
if "role" not in message:
message["role"] = "user"
self.messages.append(message)
# String (we construct a user message dict)
elif isinstance(message, str):
self.messages.append({"role": "user", "message": message})
# List (this is like the OpenAI API)
elif isinstance(message, list):
self.messages = message
yield from self._respond()
# Save conversation if we've turned conversation_history on
if self.conversation_history:
# If it's the first message, set the conversation name
if not self.conversation_filename:
first_few_words = "_".join(
self.messages[0]["message"][:25].split(" ")[:-1]
)
for char in '<>:"/\\|?*!': # Invalid characters for filenames
first_few_words = first_few_words.replace(char, "")
date = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
self.conversation_filename = (
"__".join([first_few_words, date]) + ".json"
)
# Check if the directory exists, if not, create it
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
with open(
os.path.join(
self.conversation_history_path, self.conversation_filename
),
"w",
) as f:
json.dump(self.messages, f)
return
raise Exception(
"`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`."
)
def _respond(self):
| """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
self._llm = setup_llm(self)
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
# Quite different from the plain generator stuff. So redirect to that
if display:
yield from terminal_interface(self, message)
return
# One-off message
if message or message == "":
if message == "":
message = "No entry from user - please suggest something to enter."
## We support multiple formats for the incoming message:
# Dict (these are passed directly in)
if isinstance(message, dict):
if "role" not in message:
message["role"] = "user"
self.messages.append(message)
# String (we construct a user message dict)
elif isinstance(message, str):
self.messages.append({"role": "user", "message": message})
# List (this is like the OpenAI API)
elif isinstance(message, list):
self.messages = message
yield from self._respond()
# Save conversation if we've turned conversation_history on
if self.conversation_history:
# If it's the first message, set the conversation name
if not self.conversation_filename:
first_few_words = "_".join(
self.messages[0]["message"][:25].split(" ")[:-1]
)
for char in '<>:"/\\|?*!': # Invalid characters for filenames
first_few_words = first_few_words.replace(char, "")
date = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
self.conversation_filename = (
"__".join([first_few_words, date]) + ".json"
)
# Check if the directory exists, if not, create it
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
with open(
os.path.join(
self.conversation_history_path, self.conversation_filename
),
"w",
) as f:
json.dump(self.messages, f)
return
raise Exception(
"`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`."
)
def _respond(self): | yield from respond(self) | 9 | 2023-11-16 03:10:42+00:00 | 12k |
3dp-accelerometer/octoprint-accelerometer | octoprint_accelerometer/plugin.py | [
{
"identifier": "DataPostProcessRunner",
"path": "octoprint_accelerometer/data_post_process.py",
"snippet": "class DataPostProcessRunner:\n \"\"\"\n Runner for traversing stream files and post-processing (FFT) if necessary.\n \"\"\"\n def __init__(self,\n logger: Logger,\n on_event_callback: Optional[Callable[[DataProcessingEventType], None]],\n input_dir: str,\n input_file_prefix: str,\n algorithm_d1: str,\n output_dir: str,\n output_file_prefix: str,\n output_overwrite: bool,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.logger: Logger = logger\n self.on_event_callback: Optional[Callable[[DataProcessingEventType], None]] = on_event_callback\n self._input_dir: str = input_dir\n self._input_file_prefix: str = input_file_prefix\n self._algorithm_d1: str = algorithm_d1\n self._output_dir: str = output_dir\n self._output_file_prefix: str = output_file_prefix\n self._output_overwrite: bool = output_overwrite\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[DataPostProcessBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n self._files_total: Optional[int] = None\n self._files_processed: Optional[int] = None\n self._files_skipped: Optional[int] = None\n\n @property\n def algorithm_d1(self) -> str:\n return self._algorithm_d1\n\n @algorithm_d1.setter\n def algorithm_d1(self, algorithm_d1: str):\n self._algorithm_d1 = algorithm_d1\n\n @property\n def input_dir(self) -> str:\n return self._input_dir\n\n @input_dir.setter\n def input_dir(self, input_dir: str):\n self._input_dir = input_dir\n\n @property\n def input_file_prefix(self) -> str:\n return self._input_file_prefix\n\n @input_file_prefix.setter\n def input_file_prefix(self, input_file_prefix: str):\n self._input_file_prefix = input_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_overwrite(self) -> bool:\n return self._output_overwrite\n\n @output_overwrite.setter\n def output_overwrite(self, output_overwrite: bool):\n self._output_overwrite = output_overwrite\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def _send_on_event_callback(self, event: DataProcessingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self,\n event: DataProcessingEventType,\n total: Optional[int] = None,\n processed: Optional[int] = None,\n skipped: Optional[int] = None):\n\n self._files_total = total\n self._files_processed = processed\n self._files_skipped = skipped\n\n if event == DataProcessingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [DataProcessingEventType.PROCESSING_FINISHED,\n DataProcessingEventType.UNHANDLED_EXCEPTION,\n DataProcessingEventType.ABORTED]:\n self.logger.info(\"data post processing thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(DataProcessingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(DataProcessingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def get_last_processed_count(self) -> Tuple[Optional[int], Optional[int], Optional[int]]:\n return self._files_total, self._files_processed, self._files_skipped\n\n def run(self) -> None:\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n self._files_total = None\n self._files_processed = None\n self._files_skipped = None\n\n try:\n self.logger.info(\"start data processing ...\")\n self._background_task = DataPostProcessBackgroundTask(\n logger=self.logger,\n task=DataPostProcessTask(\n logger=self.logger,\n runner=DataDecomposeRunner(\n command=\"algo\",\n input_dir=self.input_dir,\n input_file_prefix=self.input_file_prefix,\n algorithm_d1=self.algorithm_d1,\n output_dir=self.output_dir,\n output_file_prefix=self.output_file_prefix,\n output_overwrite=False),\n on_event_callback=self._send_on_thread_event_callback))\n\n self._send_on_event_callback(DataProcessingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.logger.error(\"railed to start data processing thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(DataProcessingEventType.UNHANDLED_EXCEPTION)"
},
{
"identifier": "DataProcessingEventType",
"path": "octoprint_accelerometer/event_types.py",
"snippet": "class DataProcessingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the data processing task.\n \"\"\"\n\n STARTING = 1\n \"data processing: sane execution event\"\n PROCESSING = 2\n \"data processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"data processing: sane execution event\"\n\n UNHANDLED_EXCEPTION = 12\n \"data processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\""
},
{
"identifier": "RecordingEventType",
"path": "octoprint_accelerometer/event_types.py",
"snippet": "class RecordingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the recording task.\n \"\"\"\n\n STARTING = 1\n \"processing: sane execution event\"\n PROCESSING = 2\n \"processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"processing: sane execution event\"\n\n FIFO_OVERRUN = 11\n \"processing: exceptional event\"\n UNHANDLED_EXCEPTION = 12\n \"processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\""
},
{
"identifier": "RecordStepSeriesRunner",
"path": "octoprint_accelerometer/record_step_series.py",
"snippet": "class RecordStepSeriesRunner:\n \"\"\"\n Runner for moving printer, recording streams from accelerometer and saving to data to files.\n \"\"\"\n\n def __init__(self,\n logger: Logger,\n printer: PrinterInterface,\n controller_serial_device: str,\n on_event_callback: Optional[Callable[[RecordingEventType], None]],\n controller_record_timelapse_s: float,\n controller_decode_timeout_s: float,\n sensor_odr_hz: int,\n gcode_start_point_mm: Tuple[int, int, int],\n gcode_axis: List[Literal[\"x\", \"y\", \"z\"]],\n gcode_distance_mm: int,\n gcode_step_count: int,\n gcode_sequence_count: int,\n start_frequency_hz: int,\n stop_frequency_hz: int,\n step_frequency_hz: int,\n start_zeta_em2: int,\n stop_zeta_em2: int,\n step_zeta_em2: int,\n output_file_prefix: str,\n output_dir: str,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.controller_response_error: bool = False\n self.controller_fifo_overrun_error: bool = False\n self.unhandled_exception: bool = False\n self.logger: Logger = logger\n self.printer: PrinterInterface = printer\n self._controller_serial_device: str = controller_serial_device\n self.on_event_callback: Optional[Callable[[RecordingEventType], None]] = on_event_callback\n self._controller_record_timelapse_s: float = controller_record_timelapse_s\n self._controller_decode_timeout_s: float = controller_decode_timeout_s\n self._sensor_odr_hz: int = sensor_odr_hz\n self._gcode_start_point_mm: Tuple[int, int, int] = gcode_start_point_mm\n self._gcode_axis: List[Literal[\"x\", \"y\", \"z\"]] = gcode_axis\n self._gcode_distance_mm: int = gcode_distance_mm\n self._gcode_step_count: int = gcode_step_count\n self._gcode_sequence_count: int = gcode_sequence_count\n self._start_frequency_hz: int = start_frequency_hz\n self._stop_frequency_hz: int = stop_frequency_hz\n self._step_frequency_hz: int = step_frequency_hz\n self._start_zeta_em2: int = start_zeta_em2\n self._stop_zeta_em2: int = stop_zeta_em2\n self._step_zeta_em2: int = step_zeta_em2\n self._output_file_prefix: str = output_file_prefix\n self._output_dir: str = output_dir\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[RecordStepSeriesBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n\n @property\n def controller_serial_device(self) -> str:\n return self._controller_serial_device\n\n @controller_serial_device.setter\n def controller_serial_device(self, controller_serial_device: str):\n self._controller_serial_device = controller_serial_device\n\n @property\n def controller_record_timelapse_s(self) -> float:\n return self._controller_record_timelapse_s\n\n @controller_record_timelapse_s.setter\n def controller_record_timelapse_s(self, controller_record_timelapse_s: float):\n self._controller_record_timelapse_s = controller_record_timelapse_s\n\n @property\n def controller_decode_timeout_s(self) -> float:\n return self._controller_decode_timeout_s\n\n @controller_decode_timeout_s.setter\n def controller_decode_timeout_s(self, controller_decode_timeout_s: float):\n self._controller_decode_timeout_s = controller_decode_timeout_s\n\n @property\n def sensor_odr_hz(self) -> int:\n return self._sensor_odr_hz\n\n @sensor_odr_hz.setter\n def sensor_odr_hz(self, sensor_odr_hz: int):\n self._sensor_odr_hz = sensor_odr_hz\n\n @property\n def gcode_start_point_mm(self) -> Tuple[int, int, int]:\n return self._gcode_start_point_mm\n\n @gcode_start_point_mm.setter\n def gcode_start_point_mm(self, gcode_start_point_mm: Tuple[int, int, int]):\n self._gcode_start_point_mm = gcode_start_point_mm\n\n @property\n def gcode_axis(self) -> List[Literal[\"x\", \"y\", \"z\"]]:\n return self._gcode_axis\n\n @gcode_axis.setter\n def gcode_axis(self, gcode_axis: List[Literal[\"x\", \"y\", \"z\"]]):\n self._gcode_axis = gcode_axis\n\n @property\n def gcode_distance_mm(self) -> int:\n return self._gcode_distance_mm\n\n @gcode_distance_mm.setter\n def gcode_distance_mm(self, gcode_distance_mm: int):\n self._gcode_distance_mm = gcode_distance_mm\n\n @property\n def gcode_step_count(self) -> int:\n return self._gcode_step_count\n\n @gcode_step_count.setter\n def gcode_step_count(self, gcode_step_count: int):\n self._gcode_step_count = gcode_step_count\n\n @property\n def gcode_sequence_count(self) -> int:\n return self._gcode_sequence_count\n\n @gcode_sequence_count.setter\n def gcode_sequence_count(self, gcode_sequence_count: int):\n self._gcode_sequence_count = gcode_sequence_count\n\n @property\n def start_frequency_hz(self) -> int:\n return self._start_frequency_hz\n\n @start_frequency_hz.setter\n def start_frequency_hz(self, start_frequency_hz: int):\n self._start_frequency_hz = start_frequency_hz\n\n @property\n def stop_frequency_hz(self) -> int:\n return self._stop_frequency_hz\n\n @stop_frequency_hz.setter\n def stop_frequency_hz(self, stop_frequency_hz: int):\n self._stop_frequency_hz = stop_frequency_hz\n\n @property\n def step_frequency_hz(self) -> int:\n return self._step_frequency_hz\n\n @step_frequency_hz.setter\n def step_frequency_hz(self, step_frequency_hz: int):\n self._step_frequency_hz = step_frequency_hz\n\n @property\n def start_zeta_em2(self) -> int:\n return self._start_zeta_em2\n\n @start_zeta_em2.setter\n def start_zeta_em2(self, start_zeta_em2: int):\n self._start_zeta_em2 = start_zeta_em2\n\n @property\n def stop_zeta_em2(self) -> int:\n return self._stop_zeta_em2\n\n @stop_zeta_em2.setter\n def stop_zeta_em2(self, stop_zeta_em2: int):\n self._stop_zeta_em2 = stop_zeta_em2\n\n @property\n def step_zeta_em2(self) -> int:\n return self._step_zeta_em2\n\n @step_zeta_em2.setter\n def step_zeta_em2(self, step_zeta_em2: int):\n self._step_zeta_em2 = step_zeta_em2\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def task_execution_had_errors(self) -> bool:\n return self.controller_response_error or self.controller_response_error or self.unhandled_exception\n\n def _send_on_event_callback(self, event: RecordingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self, event: RecordingEventType):\n if event == RecordingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [RecordingEventType.PROCESSING_FINISHED,\n RecordingEventType.FIFO_OVERRUN,\n RecordingEventType.UNHANDLED_EXCEPTION,\n RecordingEventType.ABORTED]:\n self.logger.info(\"recording thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(RecordingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(RecordingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def run(self) -> None:\n py3dpaxxel_octo = Py3dpAxxelOcto(self.printer, self.logger)\n self.controller_fifo_overrun_error = False\n self.controller_response_error = False\n self.unhandled_exception = False\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n\n if not self.printer.is_operational():\n self.logger.warning(\"received request to start recording but printer is not operational\")\n return\n\n try:\n self.logger.info(\"start recording ...\")\n self._background_task = RecordStepSeriesBackgroundTask(\n logger=self.logger,\n task=RecordStepSeriesTask(\n logger=self.logger,\n runner=SamplingStepsSeriesRunner(\n octoprint_api=py3dpaxxel_octo,\n controller_serial_device=self.controller_serial_device,\n controller_record_timelapse_s=self.controller_record_timelapse_s,\n controller_decode_timeout_s=self.controller_decode_timeout_s,\n sensor_odr=OutputDataRateFromHz[self.sensor_odr_hz],\n gcode_start_point_mm=self.gcode_start_point_mm,\n gcode_axis=self.gcode_axis,\n gcode_distance_mm=self.gcode_distance_mm,\n gcode_step_repeat_count=self.gcode_step_count,\n gcode_sequence_repeat_count=self.gcode_sequence_count,\n fx_start_hz=self.start_frequency_hz,\n fx_stop_hz=self.stop_frequency_hz,\n fx_step_hz=self.step_frequency_hz,\n zeta_start_em2=self.start_zeta_em2,\n zeta_stop_em2=self.start_zeta_em2,\n zeta_step_em2=self.step_zeta_em2,\n output_file_prefix=self.output_file_prefix,\n output_dir=self.output_dir,\n do_dry_run=self.do_dry_run,\n do_abort_flag=self._do_abort_flag),\n on_event_callback=self._send_on_thread_event_callback))\n self._send_on_event_callback(RecordingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.unhandled_exception = True\n self.logger.error(\"railed to start recording thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(RecordingEventType.UNHANDLED_EXCEPTION)"
},
{
"identifier": "RunMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class RunMeta:\n started: Optional[Timestamp] = None # Timestamp()\n stopped: Optional[Timestamp] = None # Timestamp()\n sequences: Dict[int, SequenceMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "SequenceMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class SequenceMeta:\n streams: Dict[str, StreamMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "StreamMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class StreamMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaStream] = None # = FilenameMetaStream()\n ffts: Dict[str, FftMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "DataSets",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class DataSets:\n runs: Dict[str, RunMeta] = field(default_factory=lambda: ({}))"
},
{
"identifier": "FftMeta",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class FftMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaFft] = None # = FilenameMetaStream()"
},
{
"identifier": "Timestamp",
"path": "octoprint_accelerometer/transfer_types.py",
"snippet": "class Timestamp:\n year: int = 0\n month: int = 0\n day: int = 0\n hour: int = 0\n minute: int = 0\n second: int = 0\n milli_second: int = 0"
}
] | import os
import flask
import octoprint.plugin
from typing import Any, Dict, List, Literal, Optional, Tuple
from octoprint.server.util.tornado import LargeResponseHandler, path_validation_factory
from octoprint.util import is_hidden_path
from py3dpaxxel.cli.args import convert_axis_from_str
from py3dpaxxel.controller.api import Py3dpAxxel
from py3dpaxxel.sampling_tasks.series_argument_generator import RunArgsGenerator
from py3dpaxxel.storage.file_filter import FileSelector, File
from py3dpaxxel.storage.filename import timestamp_from_args
from py3dpaxxel.storage.filename_meta import FilenameMetaStream, FilenameMetaFft
from octoprint_accelerometer.data_post_process import DataPostProcessRunner
from octoprint_accelerometer.event_types import DataProcessingEventType, RecordingEventType
from octoprint_accelerometer.record_step_series import RecordStepSeriesRunner
from octoprint_accelerometer.transfer_types import RunMeta, SequenceMeta, StreamMeta, DataSets, FftMeta, Timestamp | 9,090 | self.speed_y_mm_s = self._settings.get_int(["speed_y_mm_s"])
self.speed_z_mm_s = self._settings.get_int(["speed_z_mm_s"])
self.acceleration_x_mm_ss = self._settings.get_int(["acceleration_x_mm_ss"])
self.acceleration_y_mm_ss = self._settings.get_int(["acceleration_y_mm_ss"])
self.acceleration_z_mm_ss = self._settings.get_int(["acceleration_z_mm_ss"])
self.anchor_point_coord_x_mm = self._settings.get_int(["anchor_point_coord_x_mm"])
self.anchor_point_coord_y_mm = self._settings.get_int(["anchor_point_coord_y_mm"])
self.anchor_point_coord_z_mm = self._settings.get_int(["anchor_point_coord_z_mm"])
self.sequence_count = self._settings.get_int(["sequence_count"])
self.go_start = self._settings.get_boolean(["go_start"])
self.return_start = self._settings.get_boolean(["return_start"])
self.auto_home = self._settings.get_boolean(["auto_home"])
self.start_frequency_hz = self._settings.get_int(["start_frequency_hz"])
self.stop_frequency_hz = self._settings.get_int(["stop_frequency_hz"])
self.step_frequency_hz = self._settings.get_int(["step_frequency_hz"])
self.start_zeta_em2 = self._settings.get_int(["start_zeta_em2"])
self.stop_zeta_em2 = self._settings.get_int(["stop_zeta_em2"])
self.step_zeta_em2 = self._settings.get_int(["step_zeta_em2"])
self.sensor_output_data_rate_hz = self._settings.get_int(["sensor_output_data_rate_hz"])
self.data_remove_before_run = self._settings.get_boolean(["data_remove_before_run"])
self.do_sample_x = self._settings.get_boolean(["do_sample_x"])
self.do_sample_y = self._settings.get_boolean(["do_sample_y"])
self.do_sample_z = self._settings.get_boolean(["do_sample_z"])
self.recording_timespan_s = self._settings.get_float(["recording_timespan_s"])
self.sequence_separation_s = self._settings.get_float(["sequence_separation_s"])
self.step_separation_s = self._settings.get_float(["step_separation_s"])
self.do_dry_run = self._settings.get_boolean(["do_dry_run"])
self._compute_start_points()
def _compute_start_points(self) -> None:
self.axis_x_sampling_start = Point3D(self.anchor_point_coord_x_mm - int(self.distance_x_mm // 2),
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm)
self.axis_y_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm - int(self.distance_y_mm // 2),
self.anchor_point_coord_z_mm)
self.axis_z_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm + int(self.distance_z_mm // 2))
def _estimate_duration(self) -> float:
axs: List[Literal["x", "y", "z"]] = [ax for ax, enabled in [("x", self.do_sample_x), ("y", self.do_sample_y), ("z", self.do_sample_z)] if enabled]
sequences_count = len(RunArgsGenerator(
sequence_repeat_count=self.sequence_count,
fx_start_hz=self.start_frequency_hz,
fx_stop_hz=self.stop_frequency_hz,
fx_step_hz=self.step_frequency_hz,
zeta_start_em2=self.start_zeta_em2,
zeta_stop_em2=self.stop_zeta_em2,
zeta_step_em2=self.step_zeta_em2,
axis=axs,
out_file_prefix_1="", out_file_prefix_2="").generate())
duration_s = (sequences_count * self.recording_timespan_s +
(sequences_count - 1) * self.sequence_separation_s +
(self.step_count - 1) * sequences_count * self.step_separation_s)
return duration_s
def _get_parameter_dict(self, args: Dict[str, str] = None) -> Dict[str, str]:
key_name: str = "v"
requested_values: List[str] = []
if args and key_name in args.keys() and args[key_name] is not None:
requested_values.extend(args[key_name].split(","))
# reply all parameters if no names were explicitly specified
requested_values = self._get_ui_exposed_parameters() if len(requested_values) == 0 else requested_values
params_dict: Dict[str, str] = dict()
exposed_parameters = self._get_ui_exposed_parameters()
for parameter_name in [pn for pn in requested_values if pn in exposed_parameters]:
params_dict[parameter_name] = getattr(self, parameter_name)
self._logger.debug(f"xxx supply with requested parameters: {params_dict}")
return params_dict
def _get_selected_axis_str(self) -> List[Literal["x", "y", "z"]]:
return convert_axis_from_str(
f"{'x' if self.do_sample_x else ''}{'y' if self.do_sample_y else ''}{'z' if self.do_sample_z else ''}"
)
def _construct_new_data_processing_runner(self) -> DataPostProcessRunner:
return DataPostProcessRunner(
logger=self._logger,
on_event_callback=self.on_data_processing_callback,
input_dir=self.get_plugin_data_folder(),
input_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
algorithm_d1="discrete_blackman",
output_dir=self.get_plugin_data_folder(),
output_file_prefix=self.OUTPUT_FFT_FILE_NAME_PREFIX,
output_overwrite=False,
do_dry_run=False)
def _construct_new_step_series_runner(self) -> RecordStepSeriesRunner:
return RecordStepSeriesRunner(
logger=self._logger,
printer=self._printer,
controller_serial_device=self.device,
on_event_callback=self.on_recording_callback,
controller_record_timelapse_s=self.recording_timespan_s,
controller_decode_timeout_s=3.0,
sensor_odr_hz=self.sensor_output_data_rate_hz,
gcode_start_point_mm=(self.anchor_point_coord_x_mm, self.anchor_point_coord_y_mm, self.anchor_point_coord_z_mm),
gcode_axis=self._get_selected_axis_str(),
gcode_distance_mm=self.distance_x_mm,
gcode_step_count=self.step_count,
gcode_sequence_count=self.sequence_count,
start_frequency_hz=self.start_frequency_hz,
stop_frequency_hz=self.stop_frequency_hz,
step_frequency_hz=self.step_frequency_hz,
start_zeta_em2=self.start_zeta_em2,
stop_zeta_em2=self.stop_zeta_em2,
step_zeta_em2=self.step_zeta_em2,
output_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
output_dir=self.get_plugin_data_folder(),
do_dry_run=self.do_dry_run)
def _push_data_to_ui(self, data: Dict[str, str]):
self._plugin_manager.send_plugin_message(self._identifier, data)
|
class Point3D:
def __init__(self, x: int, y: int, z: int):
self.x: int = x
self.y: int = y
self.z: int = z
def __str__(self):
return f"x={self.x} y={self.y} z={self.z}"
class OctoprintAccelerometerPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.BlueprintPlugin):
OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel"
OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft"
# noinspection PyMissingConstructor
def __init__(self):
# following parameters are shared among settings and UI
self.distance_x_mm: int = 0
self.distance_y_mm: int = 0
self.distance_z_mm: int = 0
self.step_count: int = 0
self.speed_x_mm_s: int = 0
self.speed_y_mm_s: int = 0
self.speed_z_mm_s: int = 0
self.acceleration_x_mm_ss: int = 0
self.acceleration_y_mm_ss: int = 0
self.acceleration_z_mm_ss: int = 0
self.anchor_point_coord_x_mm: int = 0
self.anchor_point_coord_y_mm: int = 0
self.anchor_point_coord_z_mm: int = 0
self.sequence_count: int = 0
self.go_start: bool = False
self.return_start: bool = False
self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
data_sets.runs[run_hash].sequences[sequence_nr] = SequenceMeta()
if stream_hash not in data_sets.runs[run_hash].sequences[sequence_nr].streams.keys():
data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash] = StreamMeta(file_meta, filename_meta)
# append all FFTs to their respective stream
for file_meta, filename_meta in files_meta_data_fft:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to run, run_hash={run_hash} unknown")
continue
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to sequence, sequence_nr={sequence_nr} unknown")
continue
if stream_hash not in data_sets.runs[run_hash].sequences[sequence_nr].streams.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to stream, stream_hash={stream_hash} unknown")
continue
fft_key: str = filename_meta.fft_axis
if fft_key not in data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash].ffts.keys():
data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash].ffts[fft_key] = FftMeta(file_meta, filename_meta)
# store first and last timestamp of run
for run in data_sets.runs.values():
youngest_ts: str = "00000000-000000000"
oldest_ts: str = "99999999-235959999"
for sequence in run.sequences.values():
for stream in sequence.streams.values():
meta: FilenameMetaStream = stream.meta
ts = timestamp_from_args(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
if ts < oldest_ts:
oldest_ts = ts
run.started = Timestamp(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
if ts > youngest_ts:
youngest_ts = ts
run.stopped = Timestamp(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
return flask.jsonify({f"data_sets": data_sets})
def route_hook(self, _server_routes, *_args, **_kwargs):
return [
(r"/download/(.*)",
LargeResponseHandler,
dict(path=self.get_plugin_data_folder(),
mime_type_guesser=lambda *args, **kwargs: "text/plain",
stream_body=True,
as_attachment=False,
path_validation=path_validation_factory(
lambda path: not is_hidden_path(path), status_code=404)
)
)
]
def get_template_vars(self):
return dict(estimated_duration_s=self._estimate_duration())
def get_template_configs(self):
return [dict(type="settings", custom_bindings=True),
dict(type="tab", custom_bindings=True)]
def get_settings_defaults(self):
profile: Dict[str, Any] = self._printer_profile_manager.get_current_or_default()
width = profile["volume"]["width"]
height = profile["volume"]["height"]
depth = profile["volume"]["depth"]
origin_center: bool = True if profile["volume"]["origin"] == "center" else False
anchor_point = Point3D(0, 0, 50) if origin_center else Point3D(int(width // 2), int(depth // 2), int(height // 2))
return dict(
distance_x_mm=10,
distance_y_mm=10,
distance_z_mm=10,
step_count=2,
speed_x_mm_s=100,
speed_y_mm_s=100,
speed_z_mm_s=100,
acceleration_x_mm_ss=1000,
acceleration_y_mm_ss=1000,
acceleration_z_mm_ss=1000,
anchor_point_coord_x_mm=anchor_point.x,
anchor_point_coord_y_mm=anchor_point.y,
anchor_point_coord_z_mm=anchor_point.z,
sequence_count=1,
go_start=True,
return_start=True,
auto_home=True,
start_frequency_hz=10,
stop_frequency_hz=60,
step_frequency_hz=10,
start_zeta_em2=15,
stop_zeta_em2=15,
step_zeta_em2=5,
sensor_output_data_rate_hz=800,
data_remove_before_run=True,
do_sample_x=True,
do_sample_y=False,
do_sample_z=False,
recording_timespan_s=1.5,
sequence_separation_s=0.1,
step_separation_s=0.1,
do_dry_run=False,
)
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self._update_members_from_settings()
def on_after_startup(self):
self._update_members_from_settings()
self._update_seen_devices()
self.data_recording_runner = self._construct_new_step_series_runner()
self.data_processing_runner = self._construct_new_data_processing_runner()
self._start_data_processing()
def get_assets(self):
return {"js": ["js/octoprint_accelerometer.js",
"js/d3.js",
"js/datavis.js"]}
def get_update_information(self):
# see https://docs.octoprint.org/en/master/bundledplugins/softwareupdate.html
return {
"octoprint_accelerometer": {
"displayName": "Octoprint Accelerometer",
"displayVersion": self._plugin_version,
# version check: GitHub repository
"type": "github_release",
"user": "rubienr",
"repo": "https://github.com/3dp-accelerometer/octoprint-accelerometer",
"current": self._plugin_version,
# update method: pip
"pip": "https://github.com/3dp-accelerometer/octoprint-accelerometer/archive/{target_version}.zip",
}
}
@staticmethod
def _get_ui_exposed_parameters() -> List[str]:
return ["distance_x_mm", "distance_y_mm", "distance_z_mm",
"step_count",
"speed_x_mm_s", "speed_y_mm_s", "speed_z_mm_s",
"acceleration_x_mm_ss", "acceleration_y_mm_ss", "acceleration_z_mm_ss",
"anchor_point_coord_x_mm", "anchor_point_coord_y_mm", "anchor_point_coord_z_mm",
"sequence_count",
"go_start", "return_start", "auto_home",
"start_frequency_hz", "stop_frequency_hz", "step_frequency_hz",
"start_zeta_em2", "stop_zeta_em2", "step_zeta_em2",
"sensor_output_data_rate_hz",
"data_remove_before_run",
"do_sample_x", "do_sample_y", "do_sample_z",
"recording_timespan_s", "sequence_separation_s", "step_separation_s",
"devices_seen", "device", "do_dry_run"]
def _update_member_from_str_value(self, parameter: str, value: str):
if parameter in self._get_ui_exposed_parameters():
old_value = getattr(self, parameter)
value_type = type(old_value)
setattr(self, parameter, value_type(value))
new_value = getattr(self, parameter)
self._logger.debug(f"xxx update {parameter}: {old_value} -> {new_value} from api")
def _update_members_from_api(self, data: Dict[str, str]):
for k, v in data.items():
if hasattr(self, k):
self._update_member_from_str_value(k, v)
self._compute_start_points()
def _update_members_from_settings(self) -> None:
self._logger.debug("xxx update from settings ...")
self.distance_x_mm = self._settings.get_int(["distance_x_mm"])
self.distance_y_mm = self._settings.get_int(["distance_y_mm"])
self.distance_z_mm = self._settings.get_int(["distance_z_mm"])
self.step_count = self._settings.get_int(["step_count"])
self.speed_x_mm_s = self._settings.get_int(["speed_x_mm_s"])
self.speed_y_mm_s = self._settings.get_int(["speed_y_mm_s"])
self.speed_z_mm_s = self._settings.get_int(["speed_z_mm_s"])
self.acceleration_x_mm_ss = self._settings.get_int(["acceleration_x_mm_ss"])
self.acceleration_y_mm_ss = self._settings.get_int(["acceleration_y_mm_ss"])
self.acceleration_z_mm_ss = self._settings.get_int(["acceleration_z_mm_ss"])
self.anchor_point_coord_x_mm = self._settings.get_int(["anchor_point_coord_x_mm"])
self.anchor_point_coord_y_mm = self._settings.get_int(["anchor_point_coord_y_mm"])
self.anchor_point_coord_z_mm = self._settings.get_int(["anchor_point_coord_z_mm"])
self.sequence_count = self._settings.get_int(["sequence_count"])
self.go_start = self._settings.get_boolean(["go_start"])
self.return_start = self._settings.get_boolean(["return_start"])
self.auto_home = self._settings.get_boolean(["auto_home"])
self.start_frequency_hz = self._settings.get_int(["start_frequency_hz"])
self.stop_frequency_hz = self._settings.get_int(["stop_frequency_hz"])
self.step_frequency_hz = self._settings.get_int(["step_frequency_hz"])
self.start_zeta_em2 = self._settings.get_int(["start_zeta_em2"])
self.stop_zeta_em2 = self._settings.get_int(["stop_zeta_em2"])
self.step_zeta_em2 = self._settings.get_int(["step_zeta_em2"])
self.sensor_output_data_rate_hz = self._settings.get_int(["sensor_output_data_rate_hz"])
self.data_remove_before_run = self._settings.get_boolean(["data_remove_before_run"])
self.do_sample_x = self._settings.get_boolean(["do_sample_x"])
self.do_sample_y = self._settings.get_boolean(["do_sample_y"])
self.do_sample_z = self._settings.get_boolean(["do_sample_z"])
self.recording_timespan_s = self._settings.get_float(["recording_timespan_s"])
self.sequence_separation_s = self._settings.get_float(["sequence_separation_s"])
self.step_separation_s = self._settings.get_float(["step_separation_s"])
self.do_dry_run = self._settings.get_boolean(["do_dry_run"])
self._compute_start_points()
def _compute_start_points(self) -> None:
self.axis_x_sampling_start = Point3D(self.anchor_point_coord_x_mm - int(self.distance_x_mm // 2),
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm)
self.axis_y_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm - int(self.distance_y_mm // 2),
self.anchor_point_coord_z_mm)
self.axis_z_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm + int(self.distance_z_mm // 2))
def _estimate_duration(self) -> float:
axs: List[Literal["x", "y", "z"]] = [ax for ax, enabled in [("x", self.do_sample_x), ("y", self.do_sample_y), ("z", self.do_sample_z)] if enabled]
sequences_count = len(RunArgsGenerator(
sequence_repeat_count=self.sequence_count,
fx_start_hz=self.start_frequency_hz,
fx_stop_hz=self.stop_frequency_hz,
fx_step_hz=self.step_frequency_hz,
zeta_start_em2=self.start_zeta_em2,
zeta_stop_em2=self.stop_zeta_em2,
zeta_step_em2=self.step_zeta_em2,
axis=axs,
out_file_prefix_1="", out_file_prefix_2="").generate())
duration_s = (sequences_count * self.recording_timespan_s +
(sequences_count - 1) * self.sequence_separation_s +
(self.step_count - 1) * sequences_count * self.step_separation_s)
return duration_s
def _get_parameter_dict(self, args: Dict[str, str] = None) -> Dict[str, str]:
key_name: str = "v"
requested_values: List[str] = []
if args and key_name in args.keys() and args[key_name] is not None:
requested_values.extend(args[key_name].split(","))
# reply all parameters if no names were explicitly specified
requested_values = self._get_ui_exposed_parameters() if len(requested_values) == 0 else requested_values
params_dict: Dict[str, str] = dict()
exposed_parameters = self._get_ui_exposed_parameters()
for parameter_name in [pn for pn in requested_values if pn in exposed_parameters]:
params_dict[parameter_name] = getattr(self, parameter_name)
self._logger.debug(f"xxx supply with requested parameters: {params_dict}")
return params_dict
def _get_selected_axis_str(self) -> List[Literal["x", "y", "z"]]:
return convert_axis_from_str(
f"{'x' if self.do_sample_x else ''}{'y' if self.do_sample_y else ''}{'z' if self.do_sample_z else ''}"
)
def _construct_new_data_processing_runner(self) -> DataPostProcessRunner:
return DataPostProcessRunner(
logger=self._logger,
on_event_callback=self.on_data_processing_callback,
input_dir=self.get_plugin_data_folder(),
input_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
algorithm_d1="discrete_blackman",
output_dir=self.get_plugin_data_folder(),
output_file_prefix=self.OUTPUT_FFT_FILE_NAME_PREFIX,
output_overwrite=False,
do_dry_run=False)
def _construct_new_step_series_runner(self) -> RecordStepSeriesRunner:
return RecordStepSeriesRunner(
logger=self._logger,
printer=self._printer,
controller_serial_device=self.device,
on_event_callback=self.on_recording_callback,
controller_record_timelapse_s=self.recording_timespan_s,
controller_decode_timeout_s=3.0,
sensor_odr_hz=self.sensor_output_data_rate_hz,
gcode_start_point_mm=(self.anchor_point_coord_x_mm, self.anchor_point_coord_y_mm, self.anchor_point_coord_z_mm),
gcode_axis=self._get_selected_axis_str(),
gcode_distance_mm=self.distance_x_mm,
gcode_step_count=self.step_count,
gcode_sequence_count=self.sequence_count,
start_frequency_hz=self.start_frequency_hz,
stop_frequency_hz=self.stop_frequency_hz,
step_frequency_hz=self.step_frequency_hz,
start_zeta_em2=self.start_zeta_em2,
stop_zeta_em2=self.stop_zeta_em2,
step_zeta_em2=self.step_zeta_em2,
output_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
output_dir=self.get_plugin_data_folder(),
do_dry_run=self.do_dry_run)
def _push_data_to_ui(self, data: Dict[str, str]):
self._plugin_manager.send_plugin_message(self._identifier, data)
| def _push_recording_event_to_ui(self, event: RecordingEventType): | 2 | 2023-11-14 17:15:15+00:00 | 12k |
hmmbug/pythaidate | tests/test_pakdate.py | [
{
"identifier": "julianday",
"path": "pythaidate/julianday.py",
"snippet": "def to_julianday(year, month, day):\ndef from_julianday(jd):\ndef today(): # pragma: no cover\ndef date_to_julianday(d):\ndef julianday_to_date(obj):\n B = 0\n A = math.trunc(yearp / 100.)\n B = 2 - A + math.trunc(A / 4.)\n C = math.trunc((365.25 * yearp) - 0.75) if yearp < 0 else math.trunc(365.25 * yearp)\n D = math.trunc(30.6001 * (monthp + 1))\n F, I = math.modf(jd)\n I = int(I)\n A = math.trunc((I - 1867216.25)/36524.25)\n B = (I + 1 + A - math.trunc(A / 4.)) if I > 2299160 else I\n C = B + 1524\n D = math.trunc((C - 122.1) / 365.25)\n E = math.trunc(365.25 * D)\n G = math.trunc((C - E) / 30.6001)"
},
{
"identifier": "CsDate",
"path": "pythaidate/csdate.py",
"snippet": "class CsDate:\n\n def __init__(self, year: int, month: int=None, day: int=None,\n month_style: int = MONTH_SUK):\n logging.debug(\"args year:%s month:%s day:%s, month_style:%s\",\n year, month, day, month_style)\n self.__year = year\n self.__month = month\n self.__day = day # day of month\n self.__days = None # days elapsed in year\n self.__month_style = month_style # Sukothai, Chiang Mai, Keng Tung\n self.__init_ymd()\n self.__calculate()\n logging.debug(\"final y:%s m:%s d:%s days:%s\",\n self.__year, self.__month, self.__day, self.__days)\n\n def __init_ymd(self):\n \"\"\"\n Initialise from year, month and day args.\n \"\"\"\n self.__year0 = self.calculate_year0(self.__year)\n # logging.debug(\"offset_days:%d\", self.__year0.offset_days)\n\n date_offset = None\n if self.__month == 5:\n date_offset = self.__day\n elif self.__month == 6:\n date_offset = 29 + self.__day\n\n MP = MONTH_POSITION_C if self.__year0.cal_type == \"C\" else MONTH_POSITION_AB\n tmonth = MP.index(self.__month)\n if date_offset and date_offset < self.__year0.offset_days:\n # this is a month 5 or 6 date at end of the year\n tmonth += 13 if self.__year0.cal_type == \"C\" else 12\n # shift month number to end of the index in LUNAR_MONTHS[]\n self.__month += 10\n self.__days = MONTH_CUMULATIVE_DAYS[self.__year0.cal_type][tmonth-1] + self.__day - self.__year0.offset_days\n logging.debug(\"ymd: y:%s m:%s d:%s days:%s cal_type:%s tmonth:%s\",\n self.__year, self.__month, self.__day,\n self.__days, self.__year0.cal_type, tmonth)\n\n def __calculate(self):\n # horakhun: The number of elapsed days since epoch plus days since New Year's Day (Thai: หรคุฌ)\n self.__horakhun = (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 + self.__days\n assert self.julianday > CS_JULIAN_DAY_OFFSET # check for pre-epoch dates\n\n # kammacapon: A quantity that gives the excess of solar days over whole solar days (Thai: กัมมัขผล)\n self.__kammacapon = TIME_UNITS_IN_1_DAY - (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY\n\n # uccapon: The measure of the position of the Moon's apogee. It increases by one unit a day to\n # a maximum of 3232 (Thai: อุจจพล)\n self.__uccapon = (self.__horakhun + UCCAPON_CONSTANT) % APOGEE_ROTATION_DAYS\n\n # avoman: The excess of lunar days over solar days in units of 1/692 of a lunar day modulus 692.\n # It increases by 11 units each solar day. It is used to determine when to add intercalary days\n # in the calendar (Thai: อวมาน)\n self.__avoman = (self.__horakhun * 11 + 650) % 692\n if self.__avoman == 0:\n self.__avoman = 692\n\n # masaken: Number of lunar months since the epoch (Thai: มาสเกฌฑ์)\n avoman_div = ((self.__horakhun + self.days) * 11 + 650) // 692\n self.__masaken = (avoman_div + self.__horakhun) // 30\n\n # tithi: a lunar day, equal to 1/30th of a synodic month (Thai: ดิถี)\n quot = (self.__horakhun * 11 + 650) // 692\n self.__tithi = (quot + self.__horakhun) % 30\n\n # self.avomanExtra = (self.horakhun * 11 + 650) % 692\n logging.debug(\"horakhun:%s kamma:%s quot:%s tt:%s\", self.__horakhun, self.__kammacapon, quot, self.__tithi)\n\n @staticmethod\n def calculate_year0(year: int):\n y = [\n LSYear(year - 2),\n LSYear(year - 1),\n LSYear(year),\n LSYear(year + 1),\n LSYear(year + 2),\n ]\n # logging.debug(\"[0] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (0, 1, 2, 3, 4):\n if y[2].tithi == 24 and y[3].tithi == 6:\n # where tithi of this year is 24 and next year is 6, set all years to C-type\n # adjust next_nyd weekday\n y[i].cal_type = \"C\"\n y[i].next_nyd = (y[i].next_nyd + 2) % 7\n # logging.debug(\"[1] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Adjust c-type years where a intercalary day and month coincide. This can't happen\n # in the Thai calendar (unlike the Burmese) so we decide if the intercalary day is moved\n # to the previous or next year. This is done by ensuring a correct sequence of weekdays\n # from one year to the next.\n for i in (1, 2, 3):\n if y[i].cal_type == \"c\":\n j = 1 if y[i].nyd == y[i-1].next_nyd else -1\n y[i+j].cal_type = \"B\"\n y[i+j].next_nyd = (y[i+j].next_nyd + 1) % 7\n # logging.debug(\"[2] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (1, 2, 3):\n if y[i-1].next_nyd != y[i].nyd and y[i].next_nyd != y[i+1].nyd:\n y[i].offset = True\n y[i].langsak += 1\n y[i].nyd = (y[i].nyd + 6) % 7\n y[i].next_nyd = (y[i].next_nyd + 6) % 7\n\n # housekeeping - elabal any remaining c-type years as C-type; add day count too\n for i in (0, 1, 2, 3, 4):\n if y[i].cal_type == \"c\":\n y[i].cal_type = \"C\"\n y[i].caldays = CAL_TYPE_DAY_COUNTS[y[i].cal_type]\n # logging.debug(\"[F] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Determine month/day of new year\n y[2].first_month = \"C\" # as per Eade, C=>Caitra, V=>Vaisakha\n y[2].first_day = y[2].langsak\n y[2].offset_days = y[2].langsak # no.days offset from Caitra 1st\n if y[2].offset_days < (6 + int(y[2].offset)):\n y[2].first_month = \"V\"\n y[2].first_day = y[2].offset_days\n y[2].offset_days += 29\n return y[2]\n\n @staticmethod\n def find_date(cal: str, days: int):\n \"\"\"\n Given a calendar type (A, B, C) and number of days since new years day,\n return the month and day component of a date, derived from lookup tables.\n \"\"\"\n logging.debug(\"cal:%s days:%s\", cal, days)\n vals = {\n \"A\": (\n (383, 16), (354, 15), (324, 12), (295, 11), (265, 10), (236, 9),\n (206, 8), (177, 7), (147, 6), (118, 5), (88, 4), (59, 3), (29, 2),\n ),\n \"B\": (\n (384, 16), (355, 15), (325, 12), (296, 11), (266, 10), (237, 9),\n (207, 8), (178, 7), (148, 6), (119, 5), (89, 4), (59, 3), (29, 2),\n ),\n \"C\": (\n (384, 15), (354, 12), (325, 11), (295, 10), (266, 9), (236, 8),\n (207, 7), (177, 6), (148, 5), (118, 14), (88, 13), (59, 3), (29, 2),\n ),\n }\n assert cal in vals.keys(), ValueError(\"Cal {} not found\".format(cal))\n\n for a, b in vals[cal]:\n if days > a:\n days -= a\n logging.debug(\"solution: (a:%s b:%s) month:%s day:%s\",\n a, b, LUNAR_MONTHS[b], days)\n month = LUNAR_MONTHS[b]\n break\n month = LUNAR_MONTHS[1]\n else:\n logging.debug(\"default: month:%s (%s) day:%s\", 1, LUNAR_MONTHS[1], days)\n return month, days\n\n @classmethod\n def today(cls):\n \"\"\"\n Return today as CS date.\n \"\"\"\n jd = julianday.today()\n logging.debug(\"jd:%s\", jd)\n return cls.fromjulianday(jd)\n\n @classmethod\n def fromyd(cls, year: int, days: int):\n \"\"\"\n Return a Chulasakarat object from a year and days since new years day.\n \"\"\"\n logging.debug(\"start: year:%s days:%s\", year, days)\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n while days > days_in_year: # zero-indexed\n year += 1\n days -= days_in_year\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n logging.debug(\"days >= %s: year:%s days:%s\", 364 + int(year0.leapday), year, days)\n\n # logging.debug(\"year0 langsak:%s offset_days:%s\", year0.langsak, year0.offset_days)\n month, day = cls.find_date(year0.cal_type, year0.offset_days + days)\n logging.debug(\"year:%s month:%s day:%s\", year, month, day)\n return cls(year, month, day)\n\n @classmethod\n def fromjulianday(cls, jd: int):\n \"\"\"\n Return a Chulasakarat object from a Julian Day Number.\n \"\"\"\n hk = jd - CS_JULIAN_DAY_OFFSET\n year = (hk * 800 - 373) // 292207\n if hk % 292207 == 95333:\n # Every 800 years (292207 days), on the last day of the solar leap\n # year coinciding with an adhkimas lunar year, this jd->year\n # formula will be off by one day pushing the year forward by one\n # and the days count to -1.\n year -= 1\n days = 365\n logging.debug(\"800 year kamma adjustment\")\n else:\n year0 = cls.calculate_year0(year)\n days = hk - year0.horakhun\n # logging.debug(\"kamma:%s\", year0.kammacapon)\n # logging.debug(\"jd:%s year:%s days:%s cal_type:%s hk0:%s\", jd, year, days, year0.cal_type, year0.horakhun)\n logging.debug(\"jd:%s year:%s days:%s\", jd, year, days)\n return cls.fromyd(year=year, days=days)\n\n from_julianday = fromjulianday\n\n @classmethod\n def fromtimestamp(cls, ts):\n \"\"\"\n Return a Chulasakarat object from a UNIX timestamp.\n \"\"\"\n jd = ts // (24 * 60 * 60) + CS_UNIX_EPOCH_OFFSET\n return cls.fromjulianday(jd)\n\n @property\n def julianday(self):\n \"\"\"\n Return the Julian Day Number of this CS date.\n \"\"\"\n return self.__horakhun + CS_JULIAN_DAY_OFFSET\n\n @property\n def horakhun(self):\n return self.__horakhun\n\n @property\n def kammacapon(self):\n return self.__kammacapon\n\n @property\n def masaken(self):\n return self.__masaken\n\n @property\n def uccapon(self):\n return self.__uccapon\n\n @property\n def avoman(self):\n return self.__avoman\n\n @property\n def tithi(self):\n return self.__tithi\n\n @property\n def year(self):\n return self.__year\n\n @property\n def month(self):\n if self.__month == 15 or self.__month == 16:\n return self.__month - 10\n return self.__month\n\n @property\n def month_raw(self):\n return self.__month\n\n @property\n def day(self):\n return self.__day\n\n @property\n def days(self):\n return self.__days\n\n @property\n def solar_leap_year(self):\n return self.__year0.leapday\n\n @property\n def leap_day(self):\n return self.__year0.cal_type == 'B'\n\n @property\n def leap_month(self):\n return self.__year0.cal_type == 'C'\n\n @property\n def days_in_year(self):\n if self.__year0.cal_type == \"A\":\n return 354\n elif self.__year0.cal_type == \"B\":\n return 355\n elif self.__year0.cal_type == \"C\":\n return 384\n\n def replace(self, year=None, month=None, day=None):\n logging.debug(\"year:%s month%s day:%s\", year, month, day)\n y = year if year else self.year\n m = month if month else self.month\n d = day if day else self.day\n logging.debug(\"year:%s month%s day:%s\", y, m, d)\n return CsDate(y, m, d)\n\n def csweekday(self):\n return self.__horakhun % 7\n\n def weekday(self):\n return self.csweekday() - 2\n\n def isoweekday(self):\n return self.csweekday() - 1\n\n @property\n def yearnaksatr(self):\n idx = (self.year + 11) % 12\n if idx == 0:\n idx = 12\n return \"ปี\" + YEAR_NAKSATR[idx]\n\n def csformat(self):\n phase = \"ขึ้น\" if self.day <= 15 else \"แรม\"\n day = self.day if self.day <= 15 else self.day - 15\n s = \"{:s} เดือน {:s} {:s} {:s} ค่ำ {:s} จ.ศ.{:s}\".format(\n WEEKDAYS[self.csweekday()],\n digit_arabic_to_thai(self.month),\n phase,\n digit_arabic_to_thai(day),\n self.yearnaksatr,\n digit_arabic_to_thai(self.year)\n )\n s = digit_arabic_to_thai(s)\n return s\n\n def csformatymd(self):\n \"\"\"\n Return string in YYYY-MM-DD format.\n \"\"\"\n return \"{:4d}-{:02d}-{:02d}\".format(self.year, self.month, self.day)\n\n @classmethod\n def fromcsformat(self, s):\n s = digit_thai_to_arabic(s)\n weekday, _, month, phase, day, _, _, year = s.split()\n year = int(year.replace(\"จ.ศ.\", \"\"))\n month = int(month)\n day = int(day)\n if phase == \"แรม\":\n day += 15\n return CsDate(year, month, day)\n\n def cscalendar(self):\n return CsCalendarDate(self.year, self.month, self.day)\n\n def __str__(self):\n return self.csformat()\n\n def __int__(self):\n \"\"\"Convert to int by returning the Julian Day Number.\"\"\"\n return self.julianday\n\n def _hashable(self):\n return (\n self.__year,\n self.__month,\n self.__day,\n self.__days,\n self.__horakhun,\n self.__kammacapon,\n self.__tithi,\n self.__year0.cal_type,\n )\n\n def __hash__(self): # pragma: no cover\n return hash(self._hashable())\n\n def __lt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday < other.julianday\n elif isinstance(other, date):\n return self.julianday < julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __le__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday <= other.julianday\n elif isinstance(other, date):\n return self.julianday <= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __eq__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday == other.julianday\n elif isinstance(other, date):\n return self.julianday == julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __ge__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday >= other.julianday\n elif isinstance(other, date):\n return self.julianday >= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __gt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday > other.julianday\n elif isinstance(other, date):\n return self.julianday > julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n return CsDate.fromjulianday(self.julianday + other.days)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n elif hasattr(other, \"julianday\"):\n return timedelta(days=self.julianday - other.julianday)\n elif isinstance(other, date):\n other_jd = julianday.to_julianday(other.year, other.month, other.day)\n return timedelta(days=self.julianday - other_jd)\n return NotImplemented\n\n def debug(self): # pragma: no cover\n return {\n \"cp\": self.__year0,\n \"horakhun\": self.__horakhun,\n \"kamma\": self.__kammacapon,\n # \"avomanExtra\": self.avomanExtra,\n \"tt\": self.__tithi,\n \"year\": self.__year,\n \"month\": self.__month,\n \"day\": self.__day,\n \"days\": self.__days,\n \"cal_type\": self.__year0.cal_type,\n \"month_style\": self.__month_style,\n \"year0.langsak\": self.__year0.langsak,\n \"year0.offset\": self.__year0.offset,\n }"
},
{
"identifier": "PakDate",
"path": "pythaidate/pakdate.py",
"snippet": "class PakDate:\n\n def __init__(self, jd=None, pakcode=None, date=None):\n # assert jd is not None or pakcode is not None or date is not None\n self.__julianday = None\n self.__horakhun = None\n self.__pakkhagen = None\n self.__cycle = None\n self.__data = [0, 0, 0, 0, 0, 0]\n self.__pos = [None, None, None, None, None, None]\n self.__pakabbr = None\n\n if jd:\n self.__convert_julianday(jd)\n\n elif pakcode:\n self.__convert_pakcode(pakcode)\n\n elif date:\n jd = julianday.date_to_julianday(date)\n self.__convert_julianday(jd)\n\n @classmethod\n def today(cls):\n \"\"\"Return today as Pak date.\"\"\"\n return cls(jd=julianday.today())\n\n @classmethod\n def fromjulianday(cls, jd):\n \"\"\"Class method for Julian Day Number conversion.\"\"\"\n return cls(jd=jd)\n\n # @classmethod\n # def frompakcode(cls, pakcode):\n # \"\"\"Return Pak object from format string (x-a:b:c:d:e:f).\"\"\"\n # return cls(pakcode=pakcode)\n\n def __convert_julianday(self, jd):\n \"\"\"Convert from Julian Day Number.\"\"\"\n def div(a, b):\n c = 0\n while True:\n if b >= a:\n return c + 1, a\n a -= b\n c += 1\n\n def _adjust(row, prefix, col):\n logging.debug(\"_adjust_1(%s, %s, %s)\", row, 1-prefix, col-1)\n if col > len(layout[row][1-prefix]):\n rtn = None\n else:\n rtn = layout[row][1-prefix][col-1]\n logging.debug(\"_adjust_1(%s, %s, %s) -> %s\", row, 1-prefix, col-1, rtn)\n return rtn\n\n self.__julianday = jd\n self.__horakhun = jd - PAK_JULIAN_DAY_OFFSET\n if self.__horakhun <= 0:\n raise ValueError(\"Invalid Pakkhakhananaa range.\")\n\n days = self.__horakhun % PAK_DAYS_IN_CYCLE\n if days == 0:\n days = PAK_DAYS_IN_CYCLE\n self.__cycle = math.ceil(self.__horakhun / PAK_DAYS_IN_CYCLE)\n\n # ปักขคณนา row\n self.__data[0], rem = div(days, 16168)\n self.__pos[0] = (0, self.__data[0]-1)\n mahachula = layout[0][0][self.__data[0]-1]\n logging.debug(\"0 data:%s, mc:%s\", self.__data, self.__pos)\n\n # สัมพยุหะ, พยุหะ, สมุหะ, วรรค rows\n for row, divisor in ((1, 1447), (2, 251), (3, 59), (4, 15)):\n self.__data[row], rem = div(rem, divisor)\n mahachula1 = _adjust(row, mahachula, self.__data[row])\n # logging.debug(\"L: row:%s div:%s -> d[r]:%s rem:%s | mc:%s mc1:%s\", row, divisor, self.__data[row], rem, mc, mc1)\n if mahachula1 is None:\n # the row position is too large - decrement it by one and add\n # the divisor back on to rem for the next iteration. Do the\n # adjustment again and it should be correct.\n self.__data[row] -= 1\n rem += divisor\n mahachula1 = _adjust(row, mahachula, self.__data[row])\n self.__pos[row] = (1-mahachula, self.__data[row]-1) # display_pattern[row][self.__mahachula[row]][mc-1]\n # logging.debug(\"L: %s data:%s, mc:%s\", row, self.__data, self.__pos)\n mahachula = mahachula1\n\n # วัน (ค่ำ)\n self.__data[5] = rem\n self.__pos[5] = (mahachula, self.__data[5]-1) # display_pattern[row][self.__mahachula[4]][mc-1]\n logging.debug(\"F: %s %s %s\", self.__cycle, self.__data, self.__pos)\n\n def __convert_pakcode(self, s):\n \"\"\"Convert a Pak string (x-a:b:c:d:e:f) to a state object.\"\"\"\n cyc, pak = s.split(\"-\")\n cyc = int(cyc)\n assert cyc > 0, ValueError(\"Invalid Pak string.\")\n a, b, c, d, e, f = map(int, pak.split(\":\"))\n jd = (e - 1) * 15 + f\n jd = (d - 1) * 59 + jd\n jd = (c - 1) * 251 + jd\n jd = (b - 1) * 1447 + jd\n jd = (a - 1) * 16168 + jd\n jd += (cyc - 1) * PAK_DAYS_IN_CYCLE\n jd += 2355147\n self.__convert_julianday(jd)\n\n @property\n def julianday(self):\n # if self.__julianday is None:\n # self.__julianday = self.horakhun + PAK_JULIAN_DAY_OFFSET\n return self.__julianday\n\n @property\n def horakhun(self):\n \"\"\"\n Days since the Pakkhakhananaa epoch (1736-01-28 A.D., 2279-01-28 B.E.).(Thai: หรคุฌ)\n \"\"\"\n # if self.__horakhun is None:\n # self.__horakhun = (self.__data[0] - 1) * 16168 + \\\n # (self.__data[1] - 1) * 1447 + \\\n # (self.__data[2] - 1) * 251 + \\\n # (self.__data[3] - 1) * 59 + \\\n # (self.__data[4] - 1) * 15 + \\\n # self.__data[5]\n return self.__horakhun\n\n @property\n def pakkhagen(self):\n \"\"\"\n Number of lunar (14/15) day weeks since the epoch. (Thai: ปักขเกณฑ์)\n \"\"\"\n if self.__pakkhagen is None:\n self.__pakkhagen = (self.__cycle - 1) * 19612 + \\\n (self.__data[0] - 1) * 1095 + \\\n (self.__data[1] - 1) * 98 + \\\n (self.__data[2] - 1) * 17 + \\\n (self.__data[3] - 1) * 4 + \\\n self.__data[4]\n return self.__pakkhagen\n\n @property\n def pakcode(self):\n return \"{:d}-{:d}:{:d}:{:d}:{:d}:{:d}:{:d}\".format(self.__cycle, *self.__data)\n\n @property\n def pakabbr(self):\n \"\"\"\n Returns a string in \"เลขใช้บอกปักข์\" format\n \"\"\"\n def _digit1(d):\n return d // 10 if d > 9 else d\n\n def _digit2(d):\n return d % 10 if d > 9 else \" \"\n\n def _ctrans(c):\n return c if c == \" \" else \"กขฅจหฉษฐฬฮ\"[c-1]\n\n def _ntrans(c):\n return c if c == \" \" else \"๐๑๒๓๔๕๖๗๘๙\"[c]\n\n if self.__pakabbr is None:\n s1, s2 = [], []\n for i in range(5):\n v = self.__data[i]\n mahachula, col = self.__pos[i]\n if layout[i][mahachula][col] == 0:\n s1.append(_ctrans(_digit1(v)))\n s2.append(_ctrans(_digit2(v)))\n else:\n s1.append(_ntrans(_digit1(v)))\n s2.append(_ntrans(_digit2(v)))\n self.__pakabbr = \"\".join(s1) + \"\\n\" + \"\".join(s2)\n return self.__pakabbr.rstrip()\n\n @property\n def iswaxing(self):\n return self.pakkhagen % 2 == 0\n\n @property\n def iswaning(self):\n return self.pakkhagen % 2 == 1\n\n @property\n def iswanphra(self):\n f_days = 15 if self.__pos[5][0] else 14\n d = self.__data[5]\n val = d / f_days\n logging.debug(\"f_days:%d d:%d val:%d t1:%s t2:%s t3:%s\",\n f_days, d, val,\n val == 4/7, val == 8/15, val == 1)\n return val == 4/7 or val == 8/15 or val == 1\n\n issabbath = iswanphra\n\n def weekday(self):\n return self.__horakhun % 7 - 1\n\n def isoweekday(self):\n return self.__horakhun % 7\n\n def debug(self):\n return {\n \"pakcode\": self.pakcode,\n \"jd\": self.__julianday,\n \"hk\": self.__horakhun,\n \"pakkhagen\": self.__pakkhagen,\n }\n\n def pakboard(self, fh=None):\n def _display():\n def _stringify(b):\n max_prefix_len = max(map(lambda x: thai_string_width(x[0]), board))\n for r in board:\n numspaces = max_prefix_len - thai_string_width(r[0])\n r[0] = r[0] + \" \" * numspaces\n return max_prefix_len\n\n max_prefix_len = _stringify(board)\n blank = \" \" * (max_prefix_len - 1)\n headings = digit_arabic_to_thai(\" \".join([\"{:>2d}\".format(i) for i in range(1, 19)]))\n print(blank + \" \" + headings, file=fh)\n for i, r in enumerate(board):\n content = []\n for c in r[1:]:\n fmt = \"\\033[;7m{:>2s}\\033[0;0m\" if c & 0x80 else \"{:>2s}\"\n if i < 9:\n c = \"ม\" if c & 0x7F == 1 else \"จ\"\n else:\n c = digit_arabic_to_thai(str(c & 0x7f))\n content.append(fmt.format(c))\n print(\"{:s} {:s}\".format(r[0], \" \".join(content)), file=fh)\n print(\" \".join([\n blank, \" \",\n \"รอบที่\", digit_arabic_to_thai(self.__cycle), \" \",\n \"หรคุณปักขคณนา\", digit_arabic_to_thai(self.horakhun), \" \",\n \"ปักขเกณฑ์\", digit_arabic_to_thai(self.pakkhagen)\n ]), file=fh)\n\n if fh is None:\n fh = sys.stdout\n\n # setup board\n board = [\n [\"ปักขคณนา\", *layout[0][0]],\n [\"มหาสัมพยุหะ\", *layout[1][0]],\n [\"จุลสัมพยุหะ\", *layout[1][1]],\n [\"มหาพยุหะ\", *layout[2][0]],\n [\"จุลพยุหะ\", *layout[2][1]],\n [\"มหาสมุหะ\", *layout[3][0]],\n [\"จุลสมุหะ\", *layout[3][1]],\n [\"มหาวรรค\", *layout[4][0]],\n [\"จุลวรรค\", *layout[4][1]],\n [\"มหาปักข์\", *list(range(1,16))],\n [\"จุลปักข์\", *list(range(1,15))],\n ]\n\n # highlight row items\n for i in range(6):\n mahachula, col = self.__pos[i]\n row = 0 if i == 0 else i * 2 - 1 + mahachula\n board[row][col+1] += 0x80 # Set MSB to 1 as a \"selected\" flag\n _display()\n\n def __str__(self):\n # มหาสัมพยุหะ 6 จุลพยุหะ 5 จุลสมุหะ 6 จุลวรรค 2 จุลปักข์ 4 ขึ้น 3 ค่ำ (ปักข์ขาด / ปักข์ถ้วน)\n output = []\n next_row = 0\n for i, label in enumerate((\"สัมพยุหะ\", \"พยุหะ\", \"สมุหะ\", \"วรรค\", \"ปักข์\")):\n val = layout[i][next_row][self.__data[i]-1]\n output += [(\"มหา\" if val else \"จุล\") + label, str(self.__data[i])]\n next_row = 1 - val\n output += [\"ขึ้น\" if self.iswaxing else \"แรม\",\n str(self.__data[5]),\n \"ค่ำ\",\n \"(\" + (\"ปักข์ขาด\" if next_row else \"ปักข์ถ้วน\") + \")\"]\n return digit_arabic_to_thai(\" \".join(output))\n\n def __lt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday < other.julianday\n elif isinstance(other, date):\n return self.julianday < julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __le__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday <= other.julianday\n elif isinstance(other, date):\n return self.julianday <= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __eq__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday == other.julianday\n elif isinstance(other, date):\n return self.julianday == julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __ge__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday >= other.julianday\n elif isinstance(other, date):\n return self.julianday >= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __gt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday > other.julianday\n elif isinstance(other, date):\n return self.julianday > julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n return PakDate.fromjulianday(self.julianday + other.days)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n elif hasattr(other, \"julianday\"):\n return timedelta(days=self.julianday - other.julianday)\n elif isinstance(other, date):\n other_jd = julianday.to_julianday(other.year, other.month, other.day)\n return timedelta(days=self.julianday - other_jd)\n return NotImplemented\n\n def debug_reset(self): # pragma: no cover\n self.__horakhun = None\n self.__julianday = None\n self.__pakkhagen = None"
},
{
"identifier": "PAK_JULIAN_DAY_OFFSET",
"path": "pythaidate/constants.py",
"snippet": "PAK_JULIAN_DAY_OFFSET = 2355147"
}
] | from datetime import date, timedelta
from pythaidate import PakDate, CsDate, julianday
from pythaidate.constants import PAK_JULIAN_DAY_OFFSET
import json
import unittest
import os
import pathlib
import random
import logging | 9,969 |
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd
|
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd | p = PakDate(jd=PAK_JULIAN_DAY_OFFSET - 5) | 2 | 2023-11-18 21:14:01+00:00 | 12k |
CmosWolf1/Code_implementation_for_paper_SKZC | demo.py | [
{
"identifier": "VisualizationDemo",
"path": "diffusiondet/predictor.py",
"snippet": "class VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n parallel (bool): whether to run the model in different processes from visualization.\n Useful since the visualization logic can be slow.\n \"\"\"\n self.metadata = MetadataCatalog.get(\n cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else \"__unused\"\n )\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.parallel = parallel\n if parallel:\n num_gpu = torch.cuda.device_count()\n self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)\n else:\n self.predictor = DefaultPredictor(cfg)\n \n self.threshold = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST # workaround\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n\n Returns:\n predictions (dict): the output of the model.\n vis_output (VisImage): the visualized image output.\n \"\"\"\n vis_output = None\n predictions = self.predictor(image)\n # Filter\n instances = predictions['instances']\n new_instances = instances[instances.scores > self.threshold]\n predictions = {'instances': new_instances}\n # Convert image from OpenCV BGR format to Matplotlib RGB format.\n image = image[:, :, ::-1]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_output = visualizer.draw_panoptic_seg_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n else:\n if \"sem_seg\" in predictions:\n vis_output = visualizer.draw_sem_seg(\n predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n if \"instances\" in predictions:\n instances = predictions[\"instances\"].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n\n return predictions, vis_output\n\n def _frame_from_video(self, video):\n while video.isOpened():\n success, frame = video.read()\n if success:\n yield frame\n else:\n break\n\n def run_on_video(self, video):\n \"\"\"\n Visualizes predictions on frames of the input video.\n\n Args:\n video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be\n either a webcam or a video file.\n\n Yields:\n ndarray: BGR visualizations of each video frame.\n \"\"\"\n video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)\n\n def process_predictions(frame, predictions):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_frame = video_visualizer.draw_panoptic_seg_predictions(\n frame, panoptic_seg.to(self.cpu_device), segments_info\n )\n elif \"instances\" in predictions:\n predictions = predictions[\"instances\"].to(self.cpu_device)\n vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)\n elif \"sem_seg\" in predictions:\n vis_frame = video_visualizer.draw_sem_seg(\n frame, predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n\n # Converts Matplotlib RGB format to OpenCV BGR format\n vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)\n return vis_frame\n\n frame_gen = self._frame_from_video(video)\n if self.parallel:\n buffer_size = self.predictor.default_buffer_size\n\n frame_data = deque()\n\n for cnt, frame in enumerate(frame_gen):\n frame_data.append(frame)\n self.predictor.put(frame)\n\n if cnt >= buffer_size:\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n\n while len(frame_data):\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n else:\n for frame in frame_gen:\n yield process_predictions(frame, self.predictor(frame))"
},
{
"identifier": "add_diffusiondet_config",
"path": "diffusiondet/config.py",
"snippet": "def add_diffusiondet_config(cfg):\n \"\"\"\n Add config for DiffusionDet\n \"\"\"\n cfg.MODEL.DiffusionDet = CN()\n cfg.MODEL.DiffusionDet.NUM_CLASSES = 80\n cfg.MODEL.DiffusionDet.NUM_PROPOSALS = 300\n\n # RCNN Head.\n cfg.MODEL.DiffusionDet.NHEADS = 8\n cfg.MODEL.DiffusionDet.DROPOUT = 0.0\n cfg.MODEL.DiffusionDet.DIM_FEEDFORWARD = 2048\n cfg.MODEL.DiffusionDet.ACTIVATION = 'relu'\n cfg.MODEL.DiffusionDet.HIDDEN_DIM = 256\n cfg.MODEL.DiffusionDet.NUM_CLS = 1\n cfg.MODEL.DiffusionDet.NUM_REG = 3\n cfg.MODEL.DiffusionDet.NUM_HEADS = 6\n\n # Dynamic Conv.\n cfg.MODEL.DiffusionDet.NUM_DYNAMIC = 2\n cfg.MODEL.DiffusionDet.DIM_DYNAMIC = 64\n\n # Loss.\n cfg.MODEL.DiffusionDet.CLASS_WEIGHT = 2.0\n cfg.MODEL.DiffusionDet.GIOU_WEIGHT = 2.0\n cfg.MODEL.DiffusionDet.L1_WEIGHT = 5.0\n cfg.MODEL.DiffusionDet.DEEP_SUPERVISION = True\n cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT = 0.1\n\n # Focal Loss.\n cfg.MODEL.DiffusionDet.USE_FOCAL = True\n cfg.MODEL.DiffusionDet.USE_FED_LOSS = False\n cfg.MODEL.DiffusionDet.ALPHA = 0.25\n cfg.MODEL.DiffusionDet.GAMMA = 2.0\n cfg.MODEL.DiffusionDet.PRIOR_PROB = 0.01\n\n # Dynamic K\n cfg.MODEL.DiffusionDet.OTA_K = 5\n\n # Diffusion\n cfg.MODEL.DiffusionDet.SNR_SCALE = 2.0\n cfg.MODEL.DiffusionDet.SAMPLE_STEP = 1\n\n # Inference\n cfg.MODEL.DiffusionDet.USE_NMS = True\n\n # Swin Backbones\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.SIZE = 'B' # 'T', 'S', 'B'\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n cfg.MODEL.SWIN.OUT_FEATURES = (0, 1, 2, 3) # modify\n\n # Optimizer.\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0\n\n # TTA.\n cfg.TEST.AUG.MIN_SIZES = (400, 500, 600, 640, 700, 900, 1000, 1100, 1200, 1300, 1400, 1800, 800)\n cfg.TEST.AUG.CVPODS_TTA = True\n cfg.TEST.AUG.SCALE_FILTER = True\n cfg.TEST.AUG.SCALE_RANGES = ([96, 10000], [96, 10000], \n [64, 10000], [64, 10000],\n [64, 10000], [0, 10000],\n [0, 10000], [0, 256],\n [0, 256], [0, 192],\n [0, 192], [0, 96],\n [0, 10000])"
},
{
"identifier": "DiffusionDetDatasetMapper",
"path": "diffusiondet/dataset_mapper.py",
"snippet": "class DiffusionDetDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by DiffusionDet.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n def __init__(self, cfg, is_train=True):\n if cfg.INPUT.CROP.ENABLED and is_train:\n self.crop_gen = [\n T.ResizeShortestEdge([400, 500, 600], sample_style=\"choice\"),\n T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),\n ]\n else:\n self.crop_gen = None\n\n self.tfm_gens = build_transform_gen(cfg, is_train)\n logging.getLogger(__name__).info(\n \"Full TransformGens used in training: {}, crop: {}\".format(str(self.tfm_gens), str(self.crop_gen))\n )\n\n self.img_format = cfg.INPUT.FORMAT\n self.is_train = is_train\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if self.crop_gen is None:\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n else:\n if np.random.rand() > 0.5:\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n else:\n image, transforms = T.apply_transform_gens(\n self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image\n )\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(annos, image_shape)\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n return dataset_dict"
},
{
"identifier": "DiffusionDetWithTTA",
"path": "diffusiondet/test_time_augmentation.py",
"snippet": "class DiffusionDetWithTTA(GeneralizedRCNNWithTTA):\n \"\"\"\n A DiffusionDet with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`DiffusionDet.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=3):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (DiffusionDet): a DiffusionDet to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n # fix the issue: cannot assign module before Module.__init__() call\n nn.Module.__init__(self)\n if isinstance(model, DistributedDataParallel):\n model = model.module\n\n self.cfg = cfg.clone()\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n # cvpods tta.\n self.enable_cvpods_tta = cfg.TEST.AUG.CVPODS_TTA\n self.enable_scale_filter = cfg.TEST.AUG.SCALE_FILTER\n self.scale_ranges = cfg.TEST.AUG.SCALE_RANGES\n self.max_detection = cfg.MODEL.DiffusionDet.NUM_PROPOSALS\n\n def _batch_inference(self, batched_inputs, detected_instances=None):\n \"\"\"\n Execute inference on a list of inputs,\n using batch size = self.batch_size, instead of the length of the list.\n\n Inputs & outputs have the same format as :meth:`DiffusionDet.forward`\n \"\"\"\n if detected_instances is None:\n detected_instances = [None] * len(batched_inputs)\n\n factors = 2 if self.tta_mapper.flip else 1\n if self.enable_scale_filter:\n assert len(batched_inputs) == len(self.scale_ranges) * factors\n\n outputs = []\n inputs, instances = [], []\n for idx, input, instance in zip(count(), batched_inputs, detected_instances):\n inputs.append(input)\n instances.append(instance)\n if self.enable_cvpods_tta:\n output = self.model.forward(inputs, do_postprocess=False)[0]\n if self.enable_scale_filter:\n pred_boxes = output.get(\"pred_boxes\")\n keep = self.filter_boxes(pred_boxes.tensor, *self.scale_ranges[idx // factors])\n output = Instances(\n image_size=output.image_size,\n pred_boxes=Boxes(pred_boxes.tensor[keep]),\n pred_classes=output.pred_classes[keep],\n scores=output.scores[keep])\n outputs.extend([output])\n else:\n\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n outputs.extend(\n self.model.forward(\n inputs,\n do_postprocess=False,\n )\n )\n inputs, instances = [], []\n return outputs\n\n @staticmethod\n def filter_boxes(boxes, min_scale, max_scale):\n \"\"\"\n boxes: (N, 4) shape\n \"\"\"\n # assert boxes.mode == \"xyxy\"\n w = boxes[:, 2] - boxes[:, 0]\n h = boxes[:, 3] - boxes[:, 1]\n keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale)\n return keep\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n # Detect boxes from all augmented versions\n all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)\n # merge all detected boxes to obtain final predictions for boxes\n if self.enable_cvpods_tta:\n merged_instances = self._merge_detections_cvpods_tta(all_boxes, all_scores, all_classes, orig_shape)\n else:\n merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)\n\n return {\"instances\": merged_instances}\n\n def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):\n # select from the union of all results\n num_boxes = len(all_boxes)\n num_classes = self.cfg.MODEL.DiffusionDet.NUM_CLASSES\n # +1 because fast_rcnn_inference expects background scores as well\n all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)\n for idx, cls, score in zip(count(), all_classes, all_scores):\n all_scores_2d[idx, cls] = score\n\n merged_instances, _ = fast_rcnn_inference_single_image(\n all_boxes,\n all_scores_2d,\n shape_hw,\n 1e-8,\n self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n self.cfg.TEST.DETECTIONS_PER_IMAGE,\n )\n\n return merged_instances\n\n def _merge_detections_cvpods_tta(self, all_boxes, all_scores, all_classes, shape_hw):\n all_scores = torch.tensor(all_scores).to(all_boxes.device)\n all_classes = torch.tensor(all_classes).to(all_boxes.device)\n\n all_boxes, all_scores, all_classes = self.merge_result_from_multi_scales(\n all_boxes, all_scores, all_classes,\n nms_type=\"soft_vote\", vote_thresh=0.65,\n max_detection=self.max_detection\n )\n\n all_boxes = Boxes(all_boxes)\n all_boxes.clip(shape_hw)\n\n result = Instances(shape_hw)\n result.pred_boxes = all_boxes\n result.scores = all_scores\n result.pred_classes = all_classes.long()\n return result\n\n def merge_result_from_multi_scales(\n self, boxes, scores, labels, nms_type=\"soft-vote\", vote_thresh=0.65, max_detection=100\n ):\n boxes, scores, labels = self.batched_vote_nms(\n boxes, scores, labels, nms_type, vote_thresh\n )\n\n number_of_detections = boxes.shape[0]\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > max_detection > 0:\n boxes = boxes[:max_detection]\n scores = scores[:max_detection]\n labels = labels[:max_detection]\n\n return boxes, scores, labels\n\n def batched_vote_nms(self, boxes, scores, labels, vote_type, vote_thresh=0.65):\n # apply per class level nms, add max_coordinates on boxes first, then remove it.\n labels = labels.float()\n max_coordinates = boxes.max() + 1\n offsets = labels.reshape(-1, 1) * max_coordinates\n boxes = boxes + offsets\n\n boxes, scores, labels = self.bbox_vote(boxes, scores, labels, vote_thresh, vote_type)\n boxes -= labels.reshape(-1, 1) * max_coordinates\n\n return boxes, scores, labels\n\n def bbox_vote(self, boxes, scores, labels, vote_thresh, vote_type=\"softvote\"):\n assert boxes.shape[0] == scores.shape[0] == labels.shape[0]\n det = torch.cat((boxes, scores.reshape(-1, 1), labels.reshape(-1, 1)), dim=1)\n\n vote_results = torch.zeros(0, 6, device=det.device)\n if det.numel() == 0:\n return vote_results[:, :4], vote_results[:, 4], vote_results[:, 5]\n\n order = scores.argsort(descending=True)\n det = det[order]\n\n while det.shape[0] > 0:\n # IOU\n area = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])\n xx1 = torch.max(det[0, 0], det[:, 0])\n yy1 = torch.max(det[0, 1], det[:, 1])\n xx2 = torch.min(det[0, 2], det[:, 2])\n yy2 = torch.min(det[0, 3], det[:, 3])\n w = torch.clamp(xx2 - xx1, min=0.)\n h = torch.clamp(yy2 - yy1, min=0.)\n inter = w * h\n iou = inter / (area[0] + area[:] - inter)\n\n # get needed merge det and delete these det\n merge_index = torch.where(iou >= vote_thresh)[0]\n vote_det = det[merge_index, :]\n det = det[iou < vote_thresh]\n\n if merge_index.shape[0] <= 1:\n vote_results = torch.cat((vote_results, vote_det), dim=0)\n else:\n if vote_type == \"soft_vote\":\n vote_det_iou = iou[merge_index]\n det_accu_sum = self.get_soft_dets_sum(vote_det, vote_det_iou)\n elif vote_type == \"vote\":\n det_accu_sum = self.get_dets_sum(vote_det)\n vote_results = torch.cat((vote_results, det_accu_sum), dim=0)\n\n order = vote_results[:, 4].argsort(descending=True)\n vote_results = vote_results[order, :]\n\n return vote_results[:, :4], vote_results[:, 4], vote_results[:, 5]\n\n @staticmethod\n def get_dets_sum(vote_det):\n vote_det[:, :4] *= vote_det[:, 4:5].repeat(1, 4)\n max_score = vote_det[:, 4].max()\n det_accu_sum = torch.zeros((1, 6), device=vote_det.device)\n det_accu_sum[:, :4] = torch.sum(vote_det[:, :4], dim=0) / torch.sum(vote_det[:, 4])\n det_accu_sum[:, 4] = max_score\n det_accu_sum[:, 5] = vote_det[0, 5]\n return det_accu_sum\n\n @staticmethod\n def get_soft_dets_sum(vote_det, vote_det_iou):\n soft_vote_det = vote_det.detach().clone()\n soft_vote_det[:, 4] *= (1 - vote_det_iou)\n\n INFERENCE_TH = 0.05\n soft_index = torch.where(soft_vote_det[:, 4] >= INFERENCE_TH)[0]\n soft_vote_det = soft_vote_det[soft_index, :]\n\n vote_det[:, :4] *= vote_det[:, 4:5].repeat(1, 4)\n max_score = vote_det[:, 4].max()\n det_accu_sum = torch.zeros((1, 6), device=vote_det.device)\n det_accu_sum[:, :4] = torch.sum(vote_det[:, :4], dim=0) / torch.sum(vote_det[:, 4])\n det_accu_sum[:, 4] = max_score\n det_accu_sum[:, 5] = vote_det[0, 5]\n\n if soft_vote_det.shape[0] > 0:\n det_accu_sum = torch.cat((det_accu_sum, soft_vote_det), dim=0)\n return det_accu_sum"
},
{
"identifier": "add_model_ema_configs",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def add_model_ema_configs(_C):\n _C.MODEL_EMA = type(_C)()\n _C.MODEL_EMA.ENABLED = False\n _C.MODEL_EMA.DECAY = 0.999\n # use the same as MODEL.DEVICE when empty\n _C.MODEL_EMA.DEVICE = \"\"\n # When True, loading the ema weight to the model when eval_only=True in build_model()\n _C.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = False\n # when True, use YOLOX EMA: https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/utils/ema.py#L22\n _C.MODEL_EMA.YOLOX = False"
},
{
"identifier": "may_build_model_ema",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def may_build_model_ema(cfg, model):\n if not cfg.MODEL_EMA.ENABLED:\n return\n model = _remove_ddp(model)\n assert not hasattr(\n model, \"ema_state\"\n ), \"Name `ema_state` is reserved for model ema.\"\n model.ema_state = EMAState()\n logger.info(\"Using Model EMA.\")"
},
{
"identifier": "may_get_ema_checkpointer",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def may_get_ema_checkpointer(cfg, model):\n if not cfg.MODEL_EMA.ENABLED:\n return {}\n model = _remove_ddp(model)\n return {\"ema_state\": model.ema_state}"
},
{
"identifier": "EMAHook",
"path": "diffusiondet/util/model_ema.py",
"snippet": "class EMAHook(HookBase):\n def __init__(self, cfg, model):\n model = _remove_ddp(model)\n assert cfg.MODEL_EMA.ENABLED\n assert hasattr(\n model, \"ema_state\"\n ), \"Call `may_build_model_ema` first to initilaize the model ema\"\n self.model = model\n self.ema = self.model.ema_state\n self.device = cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE\n self.ema_updater = EMAUpdater(\n self.model.ema_state, decay=cfg.MODEL_EMA.DECAY, device=self.device, yolox=cfg.MODEL_EMA.YOLOX\n )\n\n def before_train(self):\n if self.ema.has_inited():\n self.ema.to(self.device)\n else:\n self.ema_updater.init_state(self.model)\n\n def after_train(self):\n pass\n\n def before_step(self):\n pass\n\n def after_step(self):\n if not self.model.train:\n return\n self.ema_updater.update(self.model)"
},
{
"identifier": "apply_model_ema_and_restore",
"path": "diffusiondet/util/model_ema.py",
"snippet": "@contextmanager\ndef apply_model_ema_and_restore(model, state=None):\n \"\"\"Apply ema stored in `model` to model and returns a function to restore\n the weights are applied\n \"\"\"\n model = _remove_ddp(model)\n\n if state is None:\n state = get_model_ema_state(model)\n\n old_state = EMAState.FromModel(model, state.device)\n state.apply_to(model)\n yield old_state\n old_state.apply_to(model)"
},
{
"identifier": "EMADetectionCheckpointer",
"path": "diffusiondet/util/model_ema.py",
"snippet": "class EMADetectionCheckpointer(DetectionCheckpointer):\n def resume_or_load(self, path: str, *, resume: bool = True) -> Dict[str, Any]:\n \"\"\"\n If `resume` is True, this method attempts to resume from the last\n checkpoint, if exists. Otherwise, load checkpoint from the given path.\n This is useful when restarting an interrupted training job.\n\n Args:\n path (str): path to the checkpoint.\n resume (bool): if True, resume from the last checkpoint if it exists\n and load the model together with all the checkpointables. Otherwise\n only load the model without loading any checkpointables.\n\n Returns:\n same as :meth:`load`.\n \"\"\"\n if resume and self.has_checkpoint():\n path = self.get_checkpoint_file()\n return self.load(path)\n else:\n # workaround `self.load`\n return self.load(path, checkpointables=None) # modify"
}
] | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from diffusiondet.predictor import VisualizationDemo
from diffusiondet import DiffusionDetDatasetMapper, add_diffusiondet_config, DiffusionDetWithTTA
from diffusiondet.util.model_ema import add_model_ema_configs, may_build_model_ema, may_get_ema_checkpointer, EMAHook, \
apply_model_ema_and_restore, EMADetectionCheckpointer | 7,250 | # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg)
add_model_ema_configs(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg)
add_model_ema_configs(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| demo = VisualizationDemo(cfg) | 0 | 2023-11-17 02:37:37+00:00 | 12k |
fg320/DEASC | examples/08_3x3_farm_wso_TURBO.py | [
{
"identifier": "WfModel",
"path": "deasc/wf_model.py",
"snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info"
},
{
"identifier": "WSOpt",
"path": "deasc/wake_steering.py",
"snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)"
}
] | import numpy as np
from deasc import WfModel
from deasc import WSOpt | 8,358 |
"""
This example shows wake steering optimisation on a 3x3 wind farm of NREL 5 MW turbines.
The initial conditions are 0 deg for all wind turbines. The optimisation variables are
all turbines, except the last, most downstream row. The optimiser is TURBO.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
|
"""
This example shows wake steering optimisation on a 3x3 wind farm of NREL 5 MW turbines.
The initial conditions are 0 deg for all wind turbines. The optimisation variables are
all turbines, except the last, most downstream row. The optimiser is TURBO.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model | wf_model = WfModel(input_file, path) | 0 | 2023-11-10 18:13:27+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.